From 830d433472605a42c2ba413975090a3af2ffb3e2 Mon Sep 17 00:00:00 2001 From: Terry <76925935+thao-wish@users.noreply.github.com> Date: Fri, 19 Mar 2021 13:55:51 -0400 Subject: [PATCH 001/181] Add server-side metrics (#236) Signed-off-by: Jia Hao --- examples/prom-statsd-exporter/conf.yaml | 12 ++++ src/metrics/metrics.go | 45 +++++++++++++ src/metrics/utils.go | 11 ++++ src/service_cmd/runner/runner.go | 5 +- test/metrics/metrics_test.go | 84 +++++++++++++++++++++++++ 5 files changed, 156 insertions(+), 1 deletion(-) create mode 100644 src/metrics/metrics.go create mode 100644 src/metrics/utils.go create mode 100644 test/metrics/metrics_test.go diff --git a/examples/prom-statsd-exporter/conf.yaml b/examples/prom-statsd-exporter/conf.yaml index 1e47ef98..d7dfbdb3 100644 --- a/examples/prom-statsd-exporter/conf.yaml +++ b/examples/prom-statsd-exporter/conf.yaml @@ -67,6 +67,18 @@ mappings: # Requires statsd exporter >= v0.6.0 since it uses the "drop" action. labels: err_type: "$1" + - match: "ratelimit_server.*.total_requests" + name: "ratelimit_service_total_requests" + match_metric_type: counter + labels: + grpc_method: "$1" + + - match: "ratelimit_server.*.response_time" + name: "ratelimit_service_response_time_seconds" + timer_type: histogram + labels: + grpc_method: "$1" + - match: "ratelimit.service.config_load_success" name: "ratelimit_service_config_load_success" match_metric_type: counter diff --git a/src/metrics/metrics.go b/src/metrics/metrics.go new file mode 100644 index 00000000..c1eb1109 --- /dev/null +++ b/src/metrics/metrics.go @@ -0,0 +1,45 @@ +package metrics + +import ( + "context" + stats "github.com/lyft/gostats" + "google.golang.org/grpc" + "time" +) + +type serverMetrics struct { + totalRequests stats.Counter + responseTime stats.Timer +} + +// ServerReporter reports server-side metrics for ratelimit gRPC server +type ServerReporter struct { + scope stats.Scope +} + +func newServerMetrics(scope stats.Scope, fullMethod string) *serverMetrics { + _, methodName := splitMethodName(fullMethod) + ret := serverMetrics{} + ret.totalRequests = scope.NewCounter(methodName + ".total_requests") + ret.responseTime = scope.NewTimer(methodName + ".response_time") + return &ret +} + +// NewServerReporter returns a ServerReporter object. +func NewServerReporter(scope stats.Scope) *ServerReporter { + return &ServerReporter{ + scope: scope, + } +} + +// UnaryServerInterceptor is a gRPC server-side interceptor that provides server metrics for Unary RPCs. +func (r *ServerReporter) UnaryServerInterceptor() func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + start := time.Now() + s := newServerMetrics(r.scope, info.FullMethod) + s.totalRequests.Inc() + resp, err := handler(ctx, req) + s.responseTime.AddValue(float64(time.Since(start).Milliseconds())) + return resp, err + } +} diff --git a/src/metrics/utils.go b/src/metrics/utils.go new file mode 100644 index 00000000..d32b26a8 --- /dev/null +++ b/src/metrics/utils.go @@ -0,0 +1,11 @@ +package metrics + +import "strings" + +func splitMethodName(fullMethodName string) (string, string) { + fullMethodName = strings.TrimPrefix(fullMethodName, "/") // remove leading slash + if i := strings.Index(fullMethodName, "/"); i >= 0 { + return fullMethodName[:i], fullMethodName[i+1:] + } + return "unknown", "unknown" +} diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index afa1b144..589f1c5f 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -1,6 +1,7 @@ package runner import ( + "github.com/envoyproxy/ratelimit/src/metrics" "io" "math/rand" "net/http" @@ -91,7 +92,9 @@ func (runner *Runner) Run() { localCache = freecache.NewCache(s.LocalCacheSizeInBytes) } - srv := server.NewServer(s, "ratelimit", runner.statsStore, localCache, settings.GrpcUnaryInterceptor(nil)) + serverReporter := metrics.NewServerReporter(runner.statsStore.ScopeWithTags("ratelimit_server", s.ExtraTags)) + + srv := server.NewServer(s, "ratelimit", runner.statsStore, localCache, settings.GrpcUnaryInterceptor(serverReporter.UnaryServerInterceptor())) runner.mu.Lock() runner.srv = srv runner.mu.Unlock() diff --git a/test/metrics/metrics_test.go b/test/metrics/metrics_test.go new file mode 100644 index 00000000..318db259 --- /dev/null +++ b/test/metrics/metrics_test.go @@ -0,0 +1,84 @@ +package metrics + +import ( + "context" + "github.com/envoyproxy/ratelimit/src/metrics" + stats "github.com/lyft/gostats" + statsMock "github.com/lyft/gostats/mock" + "github.com/stretchr/testify/assert" + "google.golang.org/grpc" + "testing" + "time" +) + +func TestMetricsInterceptor(t *testing.T) { + mockSink := statsMock.NewSink() + statsStore := stats.NewStore(mockSink, false) + serverReporter := metrics.NewServerReporter(statsStore) + + unaryInfo := &grpc.UnaryServerInfo{ + FullMethod: "TestService/TestMethod", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + time.Sleep(100 * time.Millisecond) + return req, nil + } + + ctx := context.Background() + interceptor := serverReporter.UnaryServerInterceptor() + + var iterations uint64 = 5 + + for i := uint64(0); i < iterations; i++ { + _, err := interceptor(ctx, nil, unaryInfo, handler) + assert.NoError(t, err) + } + + totalRequestsCounter := statsStore.NewCounter("TestMethod.total_requests") + assert.Equal(t, iterations, totalRequestsCounter.Value()) + assert.True(t, mockSink.Timer("TestMethod.response_time") >= float64(iterations*100)) +} + +func TestMetricsInterceptor_Concurrent(t *testing.T) { + mockSink := statsMock.NewSink() + statsStore := stats.NewStore(mockSink, false) + serverReporter := metrics.NewServerReporter(statsStore) + + unaryInfo := &grpc.UnaryServerInfo{ + FullMethod: "TestService/TestMethod", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return req, nil + } + + ctx := context.Background() + interceptor := serverReporter.UnaryServerInterceptor() + + var iterations uint64 = 50 + c := make(chan bool) + + go func() { + for i := uint64(0); i < iterations; i++ { + _, err := interceptor(ctx, nil, unaryInfo, handler) + assert.NoError(t, err) + } + c <- true + }() + + go func() { + for i := uint64(0); i < iterations; i++ { + _, err := interceptor(ctx, nil, unaryInfo, handler) + assert.NoError(t, err) + } + c <- true + }() + + for i := 0; i < 2; i++ { + <-c + } + + totalRequestsCounter := statsStore.NewCounter("TestMethod.total_requests") + assert.Equal(t, iterations*2, totalRequestsCounter.Value()) + // verify that timer exists in the sink + assert.NotEqual(t, 0, mockSink.Timer("TestMethod.response_time")) +} From 2fd913487a09c7e4e75677378d34b24e34aeebda Mon Sep 17 00:00:00 2001 From: Kateryna Nezdolii Date: Tue, 23 Mar 2021 18:17:15 +0100 Subject: [PATCH 002/181] Clarify memcache client constraints (#239) Signed-off-by: Kateryna Nezdolii --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 750cdb34..a326d50a 100644 --- a/README.md +++ b/README.md @@ -585,7 +585,7 @@ With memcache mode increments will happen asynchronously, so it's technically po a client to exceed quota briefly if multiple requests happen at exactly the same time. Note that Memcache has a max key length of 250 characters, so operations referencing very long -descriptors will fail. +descriptors will fail. Descriptors sent to Memcache should not contain whitespaces or control characters. # Contact From 5f6cee96b0c1207b5915899aeaa996453c2a4201 Mon Sep 17 00:00:00 2001 From: Kateryna Nezdolii Date: Thu, 25 Mar 2021 19:14:33 +0100 Subject: [PATCH 003/181] Support multiple memcache hosts in memcache backend (#238) Signed-off-by: Kateryna Nezdolii --- README.md | 5 ++++- src/memcached/cache_impl.go | 2 +- src/settings/settings.go | 2 +- test/integration/dump.rdb | Bin 0 -> 981 bytes test/integration/integration_test.go | 25 +++++++++++++++++++------ 5 files changed, 25 insertions(+), 9 deletions(-) create mode 100644 test/integration/dump.rdb diff --git a/README.md b/README.md index a326d50a..95d75b9e 100644 --- a/README.md +++ b/README.md @@ -577,7 +577,7 @@ Experimental Memcache support has been added as an alternative to Redis in v1.5. To configure a Memcache instance use the following environment variables instead of the Redis variables: -1. `MEMCACHE_HOST_PORT=` +1. `MEMCACHE_HOST_PORT=`: a comma separated list of hostname:port pairs for memcache nodes. 1. `BACKEND_TYPE=memcache` 1. `CACHE_KEY_PREFIX`: a string to prepend to all cache keys @@ -587,6 +587,9 @@ a client to exceed quota briefly if multiple requests happen at exactly the same Note that Memcache has a max key length of 250 characters, so operations referencing very long descriptors will fail. Descriptors sent to Memcache should not contain whitespaces or control characters. +When using multiple memcache nodes in `MEMCACHE_HOST_PORT=`, one should provide the identical list of memcache nodes +to all ratelimiter instances to ensure that a particular cache key is always hashed to the same memcache node. + # Contact * [envoy-announce](https://groups.google.com/forum/#!forum/envoy-announce): Low frequency mailing diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index 892565e8..1e7b0b69 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -189,7 +189,7 @@ func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRan func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.TimeSource, jitterRand *rand.Rand, localCache *freecache.Cache, scope stats.Scope) limiter.RateLimitCache { return NewRateLimitCacheImpl( - CollectStats(memcache.New(s.MemcacheHostPort), scope.Scope("memcache")), + CollectStats(memcache.New(s.MemcacheHostPort...), scope.Scope("memcache")), timeSource, jitterRand, s.ExpirationJitterMaxSeconds, diff --git a/src/settings/settings.go b/src/settings/settings.go index 8468076e..95d233c9 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -58,7 +58,7 @@ type Settings struct { RedisPerSecondPipelineLimit int `envconfig:"REDIS_PERSECOND_PIPELINE_LIMIT" default:"0"` // Memcache settings - MemcacheHostPort string `envconfig:"MEMCACHE_HOST_PORT" default:""` + MemcacheHostPort []string `envconfig:"MEMCACHE_HOST_PORT" default:""` } type Option func(*Settings) diff --git a/test/integration/dump.rdb b/test/integration/dump.rdb new file mode 100644 index 0000000000000000000000000000000000000000..b5814b13a8df2f58767f6a6d7f9cf24e20969ff0 GIT binary patch literal 981 zcmaKry=xRv6vZcK{7Q7vY_TA0qc*bP-uu40V3I-*6=NZYOlEgqaB(*yGaJ!bto;*g zECmZ2%OTiWiiK4mSXo-lKBD8!KxWw)+sFNJ&OPV82ise>cL2a-7h^jZCfye{UY6^$ z4%wyRp*Nf)=|;BPpA5(L@cs8kyV-?<#16V+J3id_=zG~}80~eR$2J`IMmF_Myy@DS zd;NLk@yjL8^KORw(d4O(gJ<>?1xlbc97t#;v?Rlb0+c8@69|y{?&|$3bsRHb7$`tM zg;J0toae$XmkX}b3zK30pBe_Es2`4ky(l`J73q>Y`c`V-oVQ|HOASJ30;!DzrE<+v z8q$_~_r$MrC|S-Jt%VXsb2MB+m2115pQ{B|r>c}TLKw=kP%MX$Se5o2-MzI)vvR{` zns}b-ii7k2(dJwcqESL{i6mj6lMi-#abp%VcQRA;-Ts|YwO#3Ct_{fB_`12-yjioi za Date: Wed, 31 Mar 2021 03:04:50 +0800 Subject: [PATCH 004/181] redis: fix per-second pipeline config bug and update doc (#240) Signed-off-by: Tong Cai --- README.md | 5 +++++ src/redis/cache_impl.go | 2 +- src/settings/settings.go | 41 ++++++++++++++++++++++++---------------- 3 files changed, 31 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index 95d75b9e..a584edc0 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,10 @@ - [Loading Configuration](#loading-configuration) - [Log Format](#log-format) - [Request Fields](#request-fields) +- [GRPC Client](#grpc-client) + - [Commandline flags](#commandline-flags) - [Statistics](#statistics) + - [Statistics options](#statistics-options) - [HTTP Port](#http-port) - [/json endpoint](#json-endpoint) - [Debug Port](#debug-port) @@ -544,6 +547,8 @@ If window is zero then implicit pipelining will be disabled. 1. `REDIS_PIPELINE_LIMIT` & `REDIS_PERSECOND_PIPELINE_LIMIT`: sets maximum number of commands that can be pipelined before flushing. If limit is zero then no limit will be used and pipelines will only be limited by the specified time window. +`implicit pipelining` is disabled by default. To enable it, you can use default values [used by radix](https://github.com/mediocregopher/radix/blob/v3.5.1/pool.go#L278) and tune for the optimal value. + ## One Redis Instance To configure one Redis instance use the following environment variables: diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index 7e619b66..cb444623 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -14,7 +14,7 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca var perSecondPool Client if s.RedisPerSecond { perSecondPool = NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, - s.RedisPerSecondType, s.RedisPerSecondUrl, s.RedisPerSecondPoolSize, s.RedisPipelineWindow, s.RedisPipelineLimit) + s.RedisPerSecondType, s.RedisPerSecondUrl, s.RedisPerSecondPoolSize, s.RedisPerSecondPipelineWindow, s.RedisPerSecondPipelineLimit) } var otherPool Client otherPool = NewClientImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisType, s.RedisUrl, s.RedisPoolSize, diff --git a/src/settings/settings.go b/src/settings/settings.go index 95d233c9..82b8ebe5 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -39,23 +39,32 @@ type Settings struct { BackendType string `envconfig:"BACKEND_TYPE" default:"redis"` // Redis settings - RedisSocketType string `envconfig:"REDIS_SOCKET_TYPE" default:"unix"` - RedisType string `envconfig:"REDIS_TYPE" default:"SINGLE"` - RedisUrl string `envconfig:"REDIS_URL" default:"/var/run/nutcracker/ratelimit.sock"` - RedisPoolSize int `envconfig:"REDIS_POOL_SIZE" default:"10"` - RedisAuth string `envconfig:"REDIS_AUTH" default:""` - RedisTls bool `envconfig:"REDIS_TLS" default:"false"` - RedisPipelineWindow time.Duration `envconfig:"REDIS_PIPELINE_WINDOW" default:"0"` - RedisPipelineLimit int `envconfig:"REDIS_PIPELINE_LIMIT" default:"0"` - RedisPerSecond bool `envconfig:"REDIS_PERSECOND" default:"false"` - RedisPerSecondSocketType string `envconfig:"REDIS_PERSECOND_SOCKET_TYPE" default:"unix"` - RedisPerSecondType string `envconfig:"REDIS_PERSECOND_TYPE" default:"SINGLE"` - RedisPerSecondUrl string `envconfig:"REDIS_PERSECOND_URL" default:"/var/run/nutcracker/ratelimitpersecond.sock"` - RedisPerSecondPoolSize int `envconfig:"REDIS_PERSECOND_POOL_SIZE" default:"10"` - RedisPerSecondAuth string `envconfig:"REDIS_PERSECOND_AUTH" default:""` - RedisPerSecondTls bool `envconfig:"REDIS_PERSECOND_TLS" default:"false"` + RedisSocketType string `envconfig:"REDIS_SOCKET_TYPE" default:"unix"` + RedisType string `envconfig:"REDIS_TYPE" default:"SINGLE"` + RedisUrl string `envconfig:"REDIS_URL" default:"/var/run/nutcracker/ratelimit.sock"` + RedisPoolSize int `envconfig:"REDIS_POOL_SIZE" default:"10"` + RedisAuth string `envconfig:"REDIS_AUTH" default:""` + RedisTls bool `envconfig:"REDIS_TLS" default:"false"` + // RedisPipelineWindow sets the duration after which internal pipelines will be flushed. + // If window is zero then implicit pipelining will be disabled. Radix use 150us for the + // default value, see https://github.com/mediocregopher/radix/blob/v3.5.1/pool.go#L278. + RedisPipelineWindow time.Duration `envconfig:"REDIS_PIPELINE_WINDOW" default:"0"` + // RedisPipelineLimit sets maximum number of commands that can be pipelined before flushing. + // If limit is zero then no limit will be used and pipelines will only be limited by the specified time window. + RedisPipelineLimit int `envconfig:"REDIS_PIPELINE_LIMIT" default:"0"` + RedisPerSecond bool `envconfig:"REDIS_PERSECOND" default:"false"` + RedisPerSecondSocketType string `envconfig:"REDIS_PERSECOND_SOCKET_TYPE" default:"unix"` + RedisPerSecondType string `envconfig:"REDIS_PERSECOND_TYPE" default:"SINGLE"` + RedisPerSecondUrl string `envconfig:"REDIS_PERSECOND_URL" default:"/var/run/nutcracker/ratelimitpersecond.sock"` + RedisPerSecondPoolSize int `envconfig:"REDIS_PERSECOND_POOL_SIZE" default:"10"` + RedisPerSecondAuth string `envconfig:"REDIS_PERSECOND_AUTH" default:""` + RedisPerSecondTls bool `envconfig:"REDIS_PERSECOND_TLS" default:"false"` + // RedisPerSecondPipelineWindow sets the duration after which internal pipelines will be flushed for per second redis. + // See comments of RedisPipelineWindow for details. RedisPerSecondPipelineWindow time.Duration `envconfig:"REDIS_PERSECOND_PIPELINE_WINDOW" default:"0"` - RedisPerSecondPipelineLimit int `envconfig:"REDIS_PERSECOND_PIPELINE_LIMIT" default:"0"` + // RedisPerSecondPipelineLimit sets maximum number of commands that can be pipelined before flushing for per second redis. + // See comments of RedisPipelineLimit for details. + RedisPerSecondPipelineLimit int `envconfig:"REDIS_PERSECOND_PIPELINE_LIMIT" default:"0"` // Memcache settings MemcacheHostPort []string `envconfig:"MEMCACHE_HOST_PORT" default:""` From db2f642f33da3d82620b10207af5b9975c35ee67 Mon Sep 17 00:00:00 2001 From: petedmarsh Date: Tue, 25 May 2021 19:29:18 +0200 Subject: [PATCH 005/181] Add env var to configure max idle connections per memcache node (#246) The underlying memcache client library allows this to be configured, and currently defaults to a value of 2, see: https://github.com/bradfitz/gomemcache/blob/master/memcache/memcache.go#L72 https://github.com/bradfitz/gomemcache/blob/master/memcache/memcache.go#L145 https://github.com/bradfitz/gomemcache/blob/master/memcache/memcache.go#L239 This allows this value to be configured by a new environmet variable: MEMCACHE_MAX_IDLE_CONNS which defaults to -1 meaning the default from the library will apply (which is the current behaviour). Signed-off-by: Peter Marsh --- README.md | 1 + src/memcached/cache_impl.go | 4 +++- src/settings/settings.go | 6 ++++++ test/integration/integration_test.go | 15 +++++++++++++++ 4 files changed, 25 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index a584edc0..c3c74b6f 100644 --- a/README.md +++ b/README.md @@ -585,6 +585,7 @@ To configure a Memcache instance use the following environment variables instead 1. `MEMCACHE_HOST_PORT=`: a comma separated list of hostname:port pairs for memcache nodes. 1. `BACKEND_TYPE=memcache` 1. `CACHE_KEY_PREFIX`: a string to prepend to all cache keys +1. `MEMCACHE_MAX_IDLE_CONNS=2`: the maximum number of idle TCP connections per memcache node, `2` is the default of the underlying library With memcache mode increments will happen asynchronously, so it's technically possible for a client to exceed quota briefly if multiple requests happen at exactly the same time. diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index 1e7b0b69..b9ecdd87 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -188,8 +188,10 @@ func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRan func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.TimeSource, jitterRand *rand.Rand, localCache *freecache.Cache, scope stats.Scope) limiter.RateLimitCache { + var client = memcache.New(s.MemcacheHostPort...) + client.MaxIdleConns = s.MemcacheMaxIdleConns return NewRateLimitCacheImpl( - CollectStats(memcache.New(s.MemcacheHostPort...), scope.Scope("memcache")), + CollectStats(client, scope.Scope("memcache")), timeSource, jitterRand, s.ExpirationJitterMaxSeconds, diff --git a/src/settings/settings.go b/src/settings/settings.go index 82b8ebe5..642d8d75 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -68,6 +68,12 @@ type Settings struct { // Memcache settings MemcacheHostPort []string `envconfig:"MEMCACHE_HOST_PORT" default:""` + // MemcacheMaxIdleConns sets the maximum number of idle TCP connections per memcached node. + // The default is 2 as that is the default of the underlying library. This is the maximum + // number of connections to memcache kept idle in pool, if a connection is needed but none + // are idle a new connection is opened, used and closed and can be left in a time-wait state + // which can result in high CPU usage. + MemcacheMaxIdleConns int `envconfig:"MEMCACHE_MAX_IDLE_CONNS" default:"2"` } type Option func(*Settings) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index da7cfa6d..249d9b2c 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -209,6 +209,21 @@ func TestBasicConfigMemcache(t *testing.T) { }) } +func TestConfigMemcacheWithMaxIdleConns(t *testing.T) { + singleNodePort := []int{6394} + assert := assert.New(t) + common.WithMultiMemcache(t, []common.MemcacheConfig{ + {Port: 6394}, + }, func() { + withDefaultMaxIdleConns := makeSimpleMemcacheSettings(singleNodePort, 0) + assert.Equal(2, withDefaultMaxIdleConns.MemcacheMaxIdleConns) + t.Run("MemcacheWithDefaultMaxIdleConns", testBasicConfig(withDefaultMaxIdleConns)) + withSpecifiedMaxIdleConns := makeSimpleMemcacheSettings(singleNodePort, 0) + withSpecifiedMaxIdleConns.MemcacheMaxIdleConns = 100 + t.Run("MemcacheWithSpecifiedMaxIdleConns", testBasicConfig(withSpecifiedMaxIdleConns)) + }) +} + func TestMultiNodeMemcache(t *testing.T) { multiNodePorts := []int{6494, 6495} common.WithMultiMemcache(t, []common.MemcacheConfig{ From 1f0aec56b6fbeebecd27daf7e97f28efde634ab7 Mon Sep 17 00:00:00 2001 From: devincd <505259926@qq.com> Date: Wed, 26 May 2021 01:32:27 +0800 Subject: [PATCH 006/181] delete unuseful code (#254) Signed-off-by: devincd <505259926@qq.com> --- src/redis/driver_impl.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index 18e213f1..f6449ea5 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -59,14 +59,10 @@ func NewClientImpl(scope stats.Scope, useTls bool, auth string, redisType string df := func(network, addr string) (radix.Conn, error) { var dialOpts []radix.DialOpt - var err error if useTls { dialOpts = append(dialOpts, radix.DialUseTLS(&tls.Config{})) } - if err != nil { - return nil, err - } if auth != "" { logger.Warnf("enabling authentication to redis on %s", url) From 83a222a6cd94efe7aa93084937395e8c01a93bcc Mon Sep 17 00:00:00 2001 From: petedmarsh Date: Tue, 25 May 2021 19:32:44 +0200 Subject: [PATCH 007/181] Hook up /debug/pprof/trace (#249) Signed-off-by: Peter Marsh --- src/server/server_impl.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/server/server_impl.go b/src/server/server_impl.go index b60d1e32..0afd443a 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -254,6 +254,14 @@ func newServer(s settings.Settings, name string, store stats.Store, localCache * }) }) + // setup trace endpoint + ret.AddDebugHttpEndpoint( + "/debug/pprof/trace", + "trace endpoint", + func(writer http.ResponseWriter, request *http.Request) { + pprof.Trace(writer, request) + }) + // setup debug root ret.debugListener.debugMux.HandleFunc( "/", From bc0e9faec3397a6dad9b27fc983be8a229306ff6 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia <5337253+sunjayBhatia@users.noreply.github.com> Date: Tue, 25 May 2021 13:33:49 -0400 Subject: [PATCH 008/181] GRPC, HTTP, and Debug server listen addresses fully configurable (#252) - servers listen addresses are configurable via environment variable - matches port configurability providing *HOST environment variables Fixes #245 Signed-off-by: Sunjay Bhatia --- README.md | 2 +- src/server/server_impl.go | 30 ++++++++++++++---------------- src/settings/settings.go | 11 +++++++---- 3 files changed, 22 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index c3c74b6f..1715b7ad 100644 --- a/README.md +++ b/README.md @@ -501,7 +501,7 @@ $ curl 0:6070/ /stats: print out stats ``` -You can specify the debug port with the `DEBUG_PORT` environment variable. It defaults to `6070`. +You can specify the debug server address with the `DEBUG_HOST` and `DEBUG_PORT` environment variables. They currently default to `0.0.0.0` and `6070` respectively. # Local Cache diff --git a/src/server/server_impl.go b/src/server/server_impl.go index 0afd443a..e77df069 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -9,6 +9,7 @@ import ( "net/http/pprof" "path/filepath" "sort" + "strconv" "sync" "os" @@ -39,9 +40,9 @@ type serverDebugListener struct { } type server struct { - port int - grpcPort int - debugPort int + httpAddress string + grpcAddress string + debugAddress string router *mux.Router grpcServer *grpc.Server store stats.Store @@ -114,11 +115,10 @@ func (server *server) GrpcServer() *grpc.Server { func (server *server) Start() { go func() { - addr := fmt.Sprintf(":%d", server.debugPort) - logger.Warnf("Listening for debug on '%s'", addr) + logger.Warnf("Listening for debug on '%s'", server.debugAddress) var err error server.listenerMu.Lock() - server.debugListener.listener, err = reuseport.Listen("tcp", addr) + server.debugListener.listener, err = reuseport.Listen("tcp", server.debugAddress) server.listenerMu.Unlock() if err != nil { @@ -133,9 +133,8 @@ func (server *server) Start() { server.handleGracefulShutdown() - addr := fmt.Sprintf(":%d", server.port) - logger.Warnf("Listening for HTTP on '%s'", addr) - list, err := reuseport.Listen("tcp", addr) + logger.Warnf("Listening for HTTP on '%s'", server.httpAddress) + list, err := reuseport.Listen("tcp", server.httpAddress) if err != nil { logger.Fatalf("Failed to open HTTP listener: '%+v'", err) } @@ -151,9 +150,8 @@ func (server *server) Start() { } func (server *server) startGrpc() { - addr := fmt.Sprintf(":%d", server.grpcPort) - logger.Warnf("Listening for gRPC on '%s'", addr) - lis, err := reuseport.Listen("tcp", addr) + logger.Warnf("Listening for gRPC on '%s'", server.grpcAddress) + lis, err := reuseport.Listen("tcp", server.grpcAddress) if err != nil { logger.Fatalf("Failed to listen for gRPC: %v", err) } @@ -180,10 +178,10 @@ func newServer(s settings.Settings, name string, store stats.Store, localCache * ret := new(server) ret.grpcServer = grpc.NewServer(s.GrpcUnaryInterceptor) - // setup ports - ret.port = s.Port - ret.grpcPort = s.GrpcPort - ret.debugPort = s.DebugPort + // setup listen addresses + ret.httpAddress = net.JoinHostPort(s.Host, strconv.Itoa(s.Port)) + ret.grpcAddress = net.JoinHostPort(s.GrpcHost, strconv.Itoa(s.GrpcPort)) + ret.debugAddress = net.JoinHostPort(s.DebugHost, strconv.Itoa(s.DebugPort)) // setup stats ret.store = store diff --git a/src/settings/settings.go b/src/settings/settings.go index 642d8d75..4c58894b 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -10,10 +10,13 @@ import ( type Settings struct { // runtime options GrpcUnaryInterceptor grpc.ServerOption - // env config - Port int `envconfig:"PORT" default:"8080"` - GrpcPort int `envconfig:"GRPC_PORT" default:"8081"` - DebugPort int `envconfig:"DEBUG_PORT" default:"6070"` + // Server listen address config + Host string `envconfig:"HOST" default:"0.0.0.0"` + Port int `envconfig:"PORT" default:"8080"` + GrpcHost string `envconfig:"GRPC_HOST" default:"0.0.0.0"` + GrpcPort int `envconfig:"GRPC_PORT" default:"8081"` + DebugHost string `envconfig:"DEBUG_HOST" default:"0.0.0.0"` + DebugPort int `envconfig:"DEBUG_PORT" default:"6070"` // Logging settings LogLevel string `envconfig:"LOG_LEVEL" default:"WARN"` From 60c8eb07c16fb1df6ce21b4b79b398a806482b68 Mon Sep 17 00:00:00 2001 From: Sunjay Bhatia <5337253+sunjayBhatia@users.noreply.github.com> Date: Tue, 25 May 2021 17:30:00 -0400 Subject: [PATCH 009/181] Fix flaky test TestServiceLegacy (#258) User deferred barrier.signal() so panic definitely occurs before we continue on in test. Config reload uses recover() and increments config load counter, tests were failing to see config load error counter increment. Fixes: #256 Signed-off-by: Sunjay Bhatia --- test/service/ratelimit_legacy_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/service/ratelimit_legacy_test.go b/test/service/ratelimit_legacy_test.go index a51ddbe9..d90f202e 100644 --- a/test/service/ratelimit_legacy_test.go +++ b/test/service/ratelimit_legacy_test.go @@ -121,7 +121,7 @@ func TestServiceLegacy(test *testing.T) { t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( func([]config.RateLimitConfigToLoad, stats.Scope) { - barrier.signal() + defer barrier.signal() panic(config.RateLimitConfigError("load error")) }) t.runtimeUpdateCallback <- 1 From 60ba180ec1d357e7726ae5b9ad630967d87cf510 Mon Sep 17 00:00:00 2001 From: petedmarsh Date: Thu, 27 May 2021 18:54:46 +0200 Subject: [PATCH 010/181] Add support for SRV records for memcache clients (#253) This allows MEMCAHE_SRV to be specified as an SRV record from which multiple memcache hosts can be resolved. For example: MEMCACHE_SRV=_memcache._tcp.mylovelydomain.com This can be used instead of MEMCACHE_HOST_PORT. This will then be resolved and whatever set of servers it represents will be used as the set of memcache servers to connect to. At this stage neither priority or weight is supported, though weight could be fairly straightforwardly in future. The SRV can be polled periodically for new servers by setting the following env var (with 0 meaning "never check"): MEMCACHE_SRV_REFRESH=600s # supports standard go durations Signed-off-by: Peter Marsh --- README.md | 4 +- src/memcached/cache_impl.go | 70 +++++++++++++++++++++++++++++-- src/memcached/client.go | 7 ++++ src/settings/settings.go | 4 +- src/srv/srv.go | 49 ++++++++++++++++++++++ test/memcached/cache_impl_test.go | 36 ++++++++++++++++ test/srv/srv_test.go | 56 +++++++++++++++++++++++++ 7 files changed, 221 insertions(+), 5 deletions(-) create mode 100644 src/srv/srv.go create mode 100644 test/srv/srv_test.go diff --git a/README.md b/README.md index 1715b7ad..6e4cdee5 100644 --- a/README.md +++ b/README.md @@ -582,7 +582,9 @@ Experimental Memcache support has been added as an alternative to Redis in v1.5. To configure a Memcache instance use the following environment variables instead of the Redis variables: -1. `MEMCACHE_HOST_PORT=`: a comma separated list of hostname:port pairs for memcache nodes. +1. `MEMCACHE_HOST_PORT=`: a comma separated list of hostname:port pairs for memcache nodes (mutually exclusive with `MEMCACHE_SRV`) +1. `MEMCACHE_SRV=`: an SRV record to lookup hosts from (mutually exclusive with `MEMCACHE_HOST_PORT`) +1. `MEMCACHE_SRV_REFRESH=0`: refresh the list of hosts every n seconds, if 0 no refreshing will happen, supports duration suffixes: "ns", "us" (or "µs"), "ms", "s", "m", "h". 1. `BACKEND_TYPE=memcache` 1. `CACHE_KEY_PREFIX`: a string to prepend to all cache keys 1. `MEMCACHE_MAX_IDLE_CONNS=2`: the maximum number of idle TCP connections per memcache node, `2` is the default of the underlying library diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index b9ecdd87..de3e095d 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -20,6 +20,7 @@ import ( "math/rand" "strconv" "sync" + "time" "github.com/coocood/freecache" stats "github.com/lyft/gostats" @@ -33,6 +34,7 @@ import ( "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/srv" "github.com/envoyproxy/ratelimit/src/utils" ) @@ -173,6 +175,70 @@ func (this *rateLimitMemcacheImpl) Flush() { this.waitGroup.Wait() } +func refreshServersPeriodically(serverList memcache.ServerList, srv string, d time.Duration, finish <-chan struct{}) { + t := time.NewTicker(d) + defer t.Stop() + for { + select { + case <-t.C: + err := refreshServers(serverList, srv) + if err != nil { + logger.Warn("failed to refresh memcahce hosts") + } else { + logger.Debug("refreshed memcache hosts") + } + case <-finish: + return + } + } +} + +func refreshServers(serverList memcache.ServerList, srv_ string) error { + servers, err := srv.ServerStringsFromSrv(srv_) + if err != nil { + return err + } + err = serverList.SetServers(servers...) + if err != nil { + return err + } + return nil +} + +func newMemcachedFromSrv(srv_ string, d time.Duration) Client { + serverList := new(memcache.ServerList) + err := refreshServers(*serverList, srv_) + if err != nil { + errorText := "Unable to fetch servers from SRV" + logger.Errorf(errorText) + panic(MemcacheError(errorText)) + } + + if d > 0 { + logger.Infof("refreshing memcache hosts every: %v milliseconds", d.Milliseconds()) + finish := make(chan struct{}) + go refreshServersPeriodically(*serverList, srv_, d, finish) + } else { + logger.Debugf("not periodically refreshing memcached hosts") + } + + return memcache.NewFromSelector(serverList) +} + +func newMemcacheFromSettings(s settings.Settings) Client { + if s.MemcacheSrv != "" && len(s.MemcacheHostPort) > 0 { + panic(MemcacheError("Both MEMCADHE_HOST_PORT and MEMCACHE_SRV are set")) + } + if s.MemcacheSrv != "" { + logger.Debugf("Using MEMCACHE_SRV: %v", s.MemcacheSrv) + return newMemcachedFromSrv(s.MemcacheSrv, s.MemcacheSrvRefresh) + } + logger.Debugf("Usng MEMCACHE_HOST_PORT:: %v", s.MemcacheHostPort) + client := memcache.New(s.MemcacheHostPort...) + client.MaxIdleConns = s.MemcacheMaxIdleConns + return client +} + func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, scope stats.Scope, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache { return &rateLimitMemcacheImpl{ @@ -188,10 +254,8 @@ func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRan func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.TimeSource, jitterRand *rand.Rand, localCache *freecache.Cache, scope stats.Scope) limiter.RateLimitCache { - var client = memcache.New(s.MemcacheHostPort...) - client.MaxIdleConns = s.MemcacheMaxIdleConns return NewRateLimitCacheImpl( - CollectStats(client, scope.Scope("memcache")), + CollectStats(newMemcacheFromSettings(s), scope.Scope("memcache")), timeSource, jitterRand, s.ExpirationJitterMaxSeconds, diff --git a/src/memcached/client.go b/src/memcached/client.go index 55c0ec31..e8090269 100644 --- a/src/memcached/client.go +++ b/src/memcached/client.go @@ -4,6 +4,13 @@ import ( "github.com/bradfitz/gomemcache/memcache" ) +// Errors that may be raised during config parsing. +type MemcacheError string + +func (e MemcacheError) Error() string { + return string(e) +} + var _ Client = (*memcache.Client)(nil) // Interface for memcached, used for mocking. diff --git a/src/settings/settings.go b/src/settings/settings.go index 4c58894b..2646b5b2 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -76,7 +76,9 @@ type Settings struct { // number of connections to memcache kept idle in pool, if a connection is needed but none // are idle a new connection is opened, used and closed and can be left in a time-wait state // which can result in high CPU usage. - MemcacheMaxIdleConns int `envconfig:"MEMCACHE_MAX_IDLE_CONNS" default:"2"` + MemcacheMaxIdleConns int `envconfig:"MEMCACHE_MAX_IDLE_CONNS" default:"2"` + MemcacheSrv string `envconfig:"MEMCACHE_SRV" default:""` + MemcacheSrvRefresh time.Duration `envconfig:"MEMCACHE_SRV_REFRESH" default:"0"` } type Option func(*Settings) diff --git a/src/srv/srv.go b/src/srv/srv.go new file mode 100644 index 00000000..041ceb95 --- /dev/null +++ b/src/srv/srv.go @@ -0,0 +1,49 @@ +package srv + +import ( + "errors" + "fmt" + "net" + "regexp" + + logger "github.com/sirupsen/logrus" +) + +var srvRegex = regexp.MustCompile(`^_(.+?)\._(.+?)\.(.+)$`) + +func ParseSrv(srv string) (string, string, string, error) { + matches := srvRegex.FindStringSubmatch(srv) + if matches == nil { + errorText := fmt.Sprintf("could not parse %s to SRV parts", srv) + logger.Errorf(errorText) + return "", "", "", errors.New(errorText) + } + return matches[1], matches[2], matches[3], nil +} + +func ServerStringsFromSrv(srv string) ([]string, error) { + service, proto, name, err := ParseSrv(srv) + + if err != nil { + logger.Errorf("failed to parse SRV: %s", err) + return nil, err + } + + _, srvs, err := net.LookupSRV(service, proto, name) + + if err != nil { + logger.Errorf("failed to lookup SRV: %s", err) + return nil, err + } + + logger.Debugf("found %v servers(s) from SRV", len(srvs)) + + serversFromSrv := make([]string, len(srvs)) + for i, srv := range srvs { + server := fmt.Sprintf("%s:%v", srv.Target, srv.Port) + logger.Debugf("server from srv[%v]: %s", i, server) + serversFromSrv[i] = fmt.Sprintf("%s:%v", srv.Target, srv.Port) + } + + return serversFromSrv, nil +} diff --git a/test/memcached/cache_impl_test.go b/test/memcached/cache_impl_test.go index 1e2ba8d7..652ac7c7 100644 --- a/test/memcached/cache_impl_test.go +++ b/test/memcached/cache_impl_test.go @@ -16,6 +16,7 @@ import ( "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/memcached" + "github.com/envoyproxy/ratelimit/src/settings" "github.com/envoyproxy/ratelimit/src/utils" stats "github.com/lyft/gostats" @@ -583,6 +584,41 @@ func TestMemcacheAdd(t *testing.T) { cache.Flush() } +func TestNewRateLimitCacheImplFromSettingsWhenSrvCannotBeResolved(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + timeSource := mock_utils.NewMockTimeSource(controller) + statsStore := stats.NewStore(stats.NewNullSink(), false) + + var s settings.Settings + s.NearLimitRatio = 0.8 + s.CacheKeyPrefix = "" + s.ExpirationJitterMaxSeconds = 300 + s.MemcacheSrv = "_something._tcp.example.invalid" + + assert.Panics(func() { memcached.NewRateLimitCacheImplFromSettings(s, timeSource, nil, nil, statsStore) }) +} + +func TestNewRateLimitCacheImplFromSettingsWhenHostAndPortAndSrvAreBothSet(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + timeSource := mock_utils.NewMockTimeSource(controller) + statsStore := stats.NewStore(stats.NewNullSink(), false) + + var s settings.Settings + s.NearLimitRatio = 0.8 + s.CacheKeyPrefix = "" + s.ExpirationJitterMaxSeconds = 300 + s.MemcacheSrv = "_something._tcp.example.invalid" + s.MemcacheHostPort = []string{"example.org:11211"} + + assert.Panics(func() { memcached.NewRateLimitCacheImplFromSettings(s, timeSource, nil, nil, statsStore) }) +} + func getMultiResult(vals map[string]int) map[string]*memcache.Item { result := make(map[string]*memcache.Item, len(vals)) for k, v := range vals { diff --git a/test/srv/srv_test.go b/test/srv/srv_test.go new file mode 100644 index 00000000..5e3e8f79 --- /dev/null +++ b/test/srv/srv_test.go @@ -0,0 +1,56 @@ +package srv + +import ( + "errors" + "net" + "testing" + + "github.com/envoyproxy/ratelimit/src/srv" + "github.com/stretchr/testify/assert" +) + +func TestParseSrv(t *testing.T) { + service, proto, name, err := srv.ParseSrv("_something._tcp.example.org.") + assert.Equal(t, service, "something") + assert.Equal(t, proto, "tcp") + assert.Equal(t, name, "example.org.") + assert.Nil(t, err) + + service, proto, name, err = srv.ParseSrv("_something-else._udp.example.org") + assert.Equal(t, service, "something-else") + assert.Equal(t, proto, "udp") + assert.Equal(t, name, "example.org") + assert.Nil(t, err) + + _, _, _, err = srv.ParseSrv("example.org") + assert.Equal(t, err, errors.New("could not parse example.org to SRV parts")) +} + +func TestServerStringsFromSrvWhenSrvIsNotWellFormed(t *testing.T) { + _, err := srv.ServerStringsFromSrv("example.org") + assert.Equal(t, err, errors.New("could not parse example.org to SRV parts")) +} + +func TestServerStringsFromSevWhenSrvIsWellFormedButNotLookupable(t *testing.T) { + _, err := srv.ServerStringsFromSrv("_something._tcp.example.invalid") + var e *net.DNSError + if errors.As(err, &e) { + assert.Equal(t, e.Err, "no such host") + assert.Equal(t, e.Name, "_something._tcp.example.invalid") + assert.False(t, e.IsTimeout) + assert.False(t, e.IsTemporary) + assert.True(t, e.IsNotFound) + } else { + t.Fail() + } +} + +func TestServerStrings(t *testing.T) { + // it seems reasonable to think _xmpp-server._tcp.gmail.com will be available for a long time! + servers, err := srv.ServerStringsFromSrv("_xmpp-server._tcp.gmail.com.") + assert.True(t, len(servers) > 0) + for _, s := range servers { + assert.Regexp(t, `^.*xmpp-server.*google.com.:\d+$`, s) + } + assert.Nil(t, err) +} From c0cdd752f8d538d16d6476ea24cfbf2e355cc25c Mon Sep 17 00:00:00 2001 From: Bohdan Storozhuk Date: Fri, 28 May 2021 16:45:49 +0100 Subject: [PATCH 011/181] Reduce short living tasks produced by memcached implementation and move them to goroutines pool (#251) Signed-off-by: bstorozhuk --- src/memcached/cache_impl.go | 36 +++++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index de3e095d..1f39dd36 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -123,7 +123,7 @@ func (this *rateLimitMemcacheImpl) DoLimit( } this.waitGroup.Add(1) - go this.increaseAsync(cacheKeys, isOverLimitWithLocalCache, limits, uint64(hitsAddend)) + runAsync(func() { this.increaseAsync(cacheKeys, isOverLimitWithLocalCache, limits, uint64(hitsAddend)) }) if AutoFlushForIntegrationTests { this.Flush() } @@ -239,6 +239,40 @@ func newMemcacheFromSettings(s settings.Settings) Client { return client } +var taskQueue = make(chan func()) + +func runAsync(task func()) { + select { + case taskQueue <- task: + // submitted, everything is ok + + default: + go func() { + // do the given task + task() + + tasksProcessedWithinOnePeriod := 0 + const tickDuration = 10 * time.Second + tick := time.NewTicker(tickDuration) + defer tick.Stop() + + for { + select { + case t := <-taskQueue: + t() + tasksProcessedWithinOnePeriod++ + case <-tick.C: + if tasksProcessedWithinOnePeriod > 0 { + tasksProcessedWithinOnePeriod = 0 + continue + } + return + } + } + }() + } +} + func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, scope stats.Scope, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache { return &rateLimitMemcacheImpl{ From 6aaad7c3a2ad29522c0605074879bc1f34aba1ed Mon Sep 17 00:00:00 2001 From: Pablo Radnic Date: Wed, 2 Jun 2021 16:34:02 -0300 Subject: [PATCH 012/181] Metric refactor (#242) Signed-off-by: Pablo Radnic --- src/config/config.go | 17 ++---- src/config/config_impl.go | 82 ++++++++++----------------- src/config_check_cmd/main.go | 9 +-- src/limiter/base_limiter.go | 16 +++--- src/memcached/cache_impl.go | 11 ++-- src/redis/cache_impl.go | 4 +- src/redis/fixed_cache_impl.go | 5 +- src/server/server_impl.go | 19 ++++--- src/service/ratelimit.go | 54 +++++------------- src/service/ratelimit_legacy.go | 25 ++------ src/service_cmd/runner/runner.go | 35 ++++++------ src/stats/manager.go | 62 ++++++++++++++++++++ src/stats/manager_impl.go | 64 +++++++++++++++++++++ test/config/config_test.go | 26 +++++---- test/limiter/base_limiter_test.go | 37 +++++++----- test/memcached/cache_impl_test.go | 61 ++++++++++++-------- test/mocks/config/config.go | 4 +- test/mocks/stats/manager.go | 56 ++++++++++++++++++ test/redis/bench_test.go | 10 ++-- test/redis/fixed_cache_impl_test.go | 53 +++++++++-------- test/service/ratelimit_legacy_test.go | 16 +++--- test/service/ratelimit_test.go | 28 +++++---- 22 files changed, 424 insertions(+), 270 deletions(-) create mode 100644 src/stats/manager.go create mode 100644 src/stats/manager_impl.go create mode 100644 test/mocks/stats/manager.go diff --git a/src/config/config.go b/src/config/config.go index 83ed972b..dbaf8968 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -3,7 +3,7 @@ package config import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - stats "github.com/lyft/gostats" + "github.com/envoyproxy/ratelimit/src/stats" "golang.org/x/net/context" ) @@ -14,19 +14,10 @@ func (e RateLimitConfigError) Error() string { return string(e) } -// Stats for an individual rate limit config entry. -type RateLimitStats struct { - TotalHits stats.Counter - OverLimit stats.Counter - NearLimit stats.Counter - OverLimitWithLocalCache stats.Counter - WithinLimit stats.Counter -} - // Wrapper for an individual rate limit config entry which includes the defined limit and stats. type RateLimit struct { FullKey string - Stats RateLimitStats + Stats stats.RateLimitStats Limit *pb.RateLimitResponse_RateLimit } @@ -53,8 +44,8 @@ type RateLimitConfigToLoad struct { type RateLimitConfigLoader interface { // Load a new configuration from a list of YAML files. // @param configs supplies a list of full YAML files in string form. - // @param statsScope supplies the stats scope to use for limit stats during runtime. + // @param statsManager supplies the statsManager to initialize stats during runtime. // @return a new configuration. // @throws RateLimitConfigError if the configuration could not be created. - Load(configs []RateLimitConfigToLoad, statsScope stats.Scope) RateLimitConfig + Load(configs []RateLimitConfigToLoad, statsManager stats.Manager) RateLimitConfig } diff --git a/src/config/config_impl.go b/src/config/config_impl.go index b5118f5e..7b48d326 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -6,7 +6,7 @@ import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - stats "github.com/lyft/gostats" + "github.com/envoyproxy/ratelimit/src/stats" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" "gopkg.in/yaml.v2" @@ -39,8 +39,8 @@ type rateLimitDomain struct { } type rateLimitConfigImpl struct { - domains map[string]*rateLimitDomain - statsScope stats.Scope + domains map[string]*rateLimitDomain + statsManager stats.Manager } var validKeys = map[string]bool{ @@ -53,30 +53,15 @@ var validKeys = map[string]bool{ "requests_per_unit": true, } -// Create new rate limit stats for a config entry. -// @param statsScope supplies the owning scope. -// @param key supplies the fully resolved key name of the entry. -// @return new stats. -func newRateLimitStats(statsScope stats.Scope, key string) RateLimitStats { - ret := RateLimitStats{} - ret.TotalHits = statsScope.NewCounter(key + ".total_hits") - ret.OverLimit = statsScope.NewCounter(key + ".over_limit") - ret.NearLimit = statsScope.NewCounter(key + ".near_limit") - ret.OverLimitWithLocalCache = statsScope.NewCounter(key + ".over_limit_with_local_cache") - ret.WithinLimit = statsScope.NewCounter(key + ".within_limit") - return ret -} - // Create a new rate limit config entry. // @param requestsPerUnit supplies the requests per unit of time for the entry. // @param unit supplies the unit of time for the entry. -// @param key supplies the fully resolved key name of the entry. -// @param scope supplies the owning scope. +// @param rlStats supplies the stats structure associated with the RateLimit // @return the new config entry. func NewRateLimit( - requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, key string, scope stats.Scope) *RateLimit { + requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stats.RateLimitStats) *RateLimit { - return &RateLimit{FullKey: key, Stats: newRateLimitStats(scope, key), Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}} + return &RateLimit{FullKey: rlStats.GetKey(), Stats: rlStats, Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}} } // Dump an individual descriptor for debugging purposes. @@ -104,10 +89,8 @@ func newRateLimitConfigError(config RateLimitConfigToLoad, err string) RateLimit // @param config supplies the config file that owns the descriptor. // @param parentKey supplies the fully resolved key name that owns this config level. // @param descriptors supplies the YAML descriptors to load. -// @param statsScope supplies the owning scope. -func (this *rateLimitDescriptor) loadDescriptors( - config RateLimitConfigToLoad, parentKey string, descriptors []yamlDescriptor, - statsScope stats.Scope) { +// @param statsManager that owns the stats.Scope. +func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, parentKey string, descriptors []yamlDescriptor, statsManager stats.Manager) { for _, descriptorConfig := range descriptors { if descriptorConfig.Key == "" { @@ -138,8 +121,7 @@ func (this *rateLimitDescriptor) loadDescriptors( } rateLimit = NewRateLimit( - descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), newParentKey, - statsScope) + descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), statsManager.NewStats(newParentKey)) rateLimitDebugString = fmt.Sprintf( " ratelimit={requests_per_unit=%d, unit=%s}", rateLimit.Limit.RequestsPerUnit, rateLimit.Limit.Unit.String()) @@ -148,8 +130,7 @@ func (this *rateLimitDescriptor) loadDescriptors( logger.Debugf( "loading descriptor: key=%s%s", newParentKey, rateLimitDebugString) newDescriptor := &rateLimitDescriptor{map[string]*rateLimitDescriptor{}, rateLimit} - newDescriptor.loadDescriptors( - config, newParentKey+".", descriptorConfig.Descriptors, statsScope) + newDescriptor.loadDescriptors(config, newParentKey+".", descriptorConfig.Descriptors, statsManager) this.descriptors[finalKey] = newDescriptor } } @@ -229,24 +210,10 @@ func (this *rateLimitConfigImpl) loadConfig(config RateLimitConfigToLoad) { logger.Debugf("loading domain: %s", root.Domain) newDomain := &rateLimitDomain{rateLimitDescriptor{map[string]*rateLimitDescriptor{}, nil}} - newDomain.loadDescriptors(config, root.Domain+".", root.Descriptors, this.statsScope) + newDomain.loadDescriptors(config, root.Domain+".", root.Descriptors, this.statsManager) this.domains[root.Domain] = newDomain } -func (this *rateLimitConfigImpl) descriptorToKey(descriptor *pb_struct.RateLimitDescriptor) string { - rateLimitKey := "" - for _, entry := range descriptor.Entries { - if rateLimitKey != "" { - rateLimitKey += "." - } - rateLimitKey += entry.Key - if entry.Value != "" { - rateLimitKey += "_" + entry.Value - } - } - return rateLimitKey -} - func (this *rateLimitConfigImpl) Dump() string { ret := "" for _, domain := range this.domains { @@ -268,13 +235,12 @@ func (this *rateLimitConfigImpl) GetLimit( } if descriptor.GetLimit() != nil { - rateLimitKey := domain + "." + this.descriptorToKey(descriptor) + rateLimitKey := descriptorKey(domain, descriptor) rateLimitOverrideUnit := pb.RateLimitResponse_RateLimit_Unit(descriptor.GetLimit().GetUnit()) rateLimit = NewRateLimit( descriptor.GetLimit().GetRequestsPerUnit(), rateLimitOverrideUnit, - rateLimitKey, - this.statsScope) + this.statsManager.NewStats(rateLimitKey)) return rateLimit } @@ -311,14 +277,28 @@ func (this *rateLimitConfigImpl) GetLimit( return rateLimit } +func descriptorKey(domain string, descriptor *pb_struct.RateLimitDescriptor) string { + rateLimitKey := "" + for _, entry := range descriptor.Entries { + if rateLimitKey != "" { + rateLimitKey += "." + } + rateLimitKey += entry.Key + if entry.Value != "" { + rateLimitKey += "_" + entry.Value + } + } + return domain + "." + rateLimitKey +} + // Create rate limit config from a list of input YAML files. // @param configs specifies a list of YAML files to load. // @param stats supplies the stats scope to use for limit stats during runtime. // @return a new config. func NewRateLimitConfigImpl( - configs []RateLimitConfigToLoad, statsScope stats.Scope) RateLimitConfig { + configs []RateLimitConfigToLoad, statsManager stats.Manager) RateLimitConfig { - ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}, statsScope} + ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}, statsManager} for _, config := range configs { ret.loadConfig(config) } @@ -329,9 +309,9 @@ func NewRateLimitConfigImpl( type rateLimitConfigLoaderImpl struct{} func (this *rateLimitConfigLoaderImpl) Load( - configs []RateLimitConfigToLoad, statsScope stats.Scope) RateLimitConfig { + configs []RateLimitConfigToLoad, statsManager stats.Manager) RateLimitConfig { - return NewRateLimitConfigImpl(configs, statsScope) + return NewRateLimitConfigImpl(configs, statsManager) } // @return a new default config loader implementation. diff --git a/src/config_check_cmd/main.go b/src/config_check_cmd/main.go index f9f3c742..e451694c 100644 --- a/src/config_check_cmd/main.go +++ b/src/config_check_cmd/main.go @@ -3,12 +3,14 @@ package main import ( "flag" "fmt" + "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/stats" "io/ioutil" "os" "path/filepath" "github.com/envoyproxy/ratelimit/src/config" - "github.com/lyft/gostats" + gostats "github.com/lyft/gostats" ) func loadConfigs(allConfigs []config.RateLimitConfigToLoad) { @@ -19,9 +21,8 @@ func loadConfigs(allConfigs []config.RateLimitConfigToLoad) { os.Exit(1) } }() - - dummyStats := stats.NewStore(stats.NewNullSink(), false) - config.NewRateLimitConfigImpl(allConfigs, dummyStats) + statsManager := stats.NewStatManager(gostats.NewStore(gostats.NewNullSink(), false), settings.NewSettings()) + config.NewRateLimitConfigImpl(allConfigs, statsManager) } func main() { diff --git a/src/limiter/base_limiter.go b/src/limiter/base_limiter.go index 44c2633e..346ff871 100644 --- a/src/limiter/base_limiter.go +++ b/src/limiter/base_limiter.go @@ -5,6 +5,7 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/stats" "github.com/envoyproxy/ratelimit/src/utils" logger "github.com/sirupsen/logrus" "math" @@ -18,6 +19,7 @@ type BaseRateLimiter struct { cacheKeyGenerator CacheKeyGenerator localCache *freecache.Cache nearLimitRatio float32 + StatsManager stats.Manager } type LimitInfo struct { @@ -89,7 +91,7 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo * responseDescriptorStatus = this.generateResponseDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT, limitInfo.limit.Limit, 0) - checkOverLimitThreshold(limitInfo, hitsAddend) + this.checkOverLimitThreshold(limitInfo, hitsAddend) if this.localCache != nil { // Set the TTL of the local_cache to be the entire duration. @@ -109,14 +111,14 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo * limitInfo.limit.Limit, limitInfo.overLimitThreshold-limitInfo.limitAfterIncrease) // The limit is OK but we additionally want to know if we are near the limit. - checkNearLimitThreshold(limitInfo, hitsAddend) + this.checkNearLimitThreshold(limitInfo, hitsAddend) limitInfo.limit.Stats.WithinLimit.Add(uint64(hitsAddend)) } return responseDescriptorStatus } func NewBaseRateLimit(timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, - localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string) *BaseRateLimiter { + localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, statsManager stats.Manager) *BaseRateLimiter { return &BaseRateLimiter{ timeSource: timeSource, JitterRand: jitterRand, @@ -124,10 +126,11 @@ func NewBaseRateLimit(timeSource utils.TimeSource, jitterRand *rand.Rand, expira cacheKeyGenerator: NewCacheKeyGenerator(cacheKeyPrefix), localCache: localCache, nearLimitRatio: nearLimitRatio, + StatsManager: statsManager, } } -func checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) { +func (this *BaseRateLimiter) checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) { // Increase over limit statistics. Because we support += behavior for increasing the limit, we need to // assess if the entire hitsAddend were over the limit. That is, if the limit's value before adding the // N hits was over the limit, then all the N hits were over limit. @@ -140,12 +143,11 @@ func checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) { // If the limit before increase was below the over limit value, then some of the hits were // in the near limit range. - limitInfo.limit.Stats.NearLimit.Add(uint64(limitInfo.overLimitThreshold - - utils.Max(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease))) + limitInfo.limit.Stats.NearLimit.Add(uint64(limitInfo.overLimitThreshold - utils.Max(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease))) } } -func checkNearLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) { +func (this *BaseRateLimiter) checkNearLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) { if limitInfo.limitAfterIncrease > limitInfo.nearLimitThreshold { // Here we also need to assess which portion of the hitsAddend were in the near limit range. // If all the hits were over the nearLimitThreshold, then all hits are near limit. Otherwise, diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index 1f39dd36..4b21af33 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -17,13 +17,14 @@ package memcached import ( "context" + "github.com/envoyproxy/ratelimit/src/stats" "math/rand" "strconv" "sync" "time" "github.com/coocood/freecache" - stats "github.com/lyft/gostats" + gostats "github.com/lyft/gostats" "github.com/bradfitz/gomemcache/memcache" @@ -274,7 +275,7 @@ func runAsync(task func()) { } func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRand *rand.Rand, - expirationJitterMaxSeconds int64, localCache *freecache.Cache, scope stats.Scope, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache { + expirationJitterMaxSeconds int64, localCache *freecache.Cache, statsManager stats.Manager, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache { return &rateLimitMemcacheImpl{ client: client, timeSource: timeSource, @@ -282,19 +283,19 @@ func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRan expirationJitterMaxSeconds: expirationJitterMaxSeconds, localCache: localCache, nearLimitRatio: nearLimitRatio, - baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix), + baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix, statsManager), } } func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.TimeSource, jitterRand *rand.Rand, - localCache *freecache.Cache, scope stats.Scope) limiter.RateLimitCache { + localCache *freecache.Cache, scope gostats.Scope, statsManager stats.Manager) limiter.RateLimitCache { return NewRateLimitCacheImpl( CollectStats(newMemcacheFromSettings(s), scope.Scope("memcache")), timeSource, jitterRand, s.ExpirationJitterMaxSeconds, localCache, - scope, + statsManager, s.NearLimitRatio, s.CacheKeyPrefix, ) diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index cb444623..7bf9eafc 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -1,6 +1,7 @@ package redis import ( + "github.com/envoyproxy/ratelimit/src/stats" "math/rand" "github.com/coocood/freecache" @@ -10,7 +11,7 @@ import ( "github.com/envoyproxy/ratelimit/src/utils" ) -func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64) limiter.RateLimitCache { +func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, statsManager stats.Manager) limiter.RateLimitCache { var perSecondPool Client if s.RedisPerSecond { perSecondPool = NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, @@ -29,5 +30,6 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca localCache, s.NearLimitRatio, s.CacheKeyPrefix, + statsManager, ) } diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index b2b3d3d2..d364f4ea 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -1,6 +1,7 @@ package redis import ( + "github.com/envoyproxy/ratelimit/src/stats" "math/rand" "github.com/coocood/freecache" @@ -107,10 +108,10 @@ func (this *fixedRateLimitCacheImpl) DoLimit( func (this *fixedRateLimitCacheImpl) Flush() {} func NewFixedRateLimitCacheImpl(client Client, perSecondClient Client, timeSource utils.TimeSource, - jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache { + jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, statsManager stats.Manager) limiter.RateLimitCache { return &fixedRateLimitCacheImpl{ client: client, perSecondClient: perSecondClient, - baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix), + baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix, statsManager), } } diff --git a/src/server/server_impl.go b/src/server/server_impl.go index e77df069..15305a32 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -4,6 +4,7 @@ import ( "bytes" "expvar" "fmt" + "github.com/envoyproxy/ratelimit/src/stats" "io" "net/http" "net/http/pprof" @@ -26,7 +27,7 @@ import ( "github.com/gorilla/mux" reuseport "github.com/kavu/go_reuseport" "github.com/lyft/goruntime/loader" - stats "github.com/lyft/gostats" + gostats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" "google.golang.org/grpc" "google.golang.org/grpc/health" @@ -45,8 +46,8 @@ type server struct { debugAddress string router *mux.Router grpcServer *grpc.Server - store stats.Store - scope stats.Scope + store gostats.Store + scope gostats.Scope runtime loader.IFace debugListener serverDebugListener httpServer *http.Server @@ -158,7 +159,7 @@ func (server *server) startGrpc() { server.grpcServer.Serve(lis) } -func (server *server) Scope() stats.Scope { +func (server *server) Scope() gostats.Scope { return server.scope } @@ -166,11 +167,11 @@ func (server *server) Runtime() loader.IFace { return server.runtime } -func NewServer(s settings.Settings, name string, store stats.Store, localCache *freecache.Cache, opts ...settings.Option) Server { - return newServer(s, name, store, localCache, opts...) +func NewServer(s settings.Settings, name string, statsManager stats.Manager, localCache *freecache.Cache, opts ...settings.Option) Server { + return newServer(s, name, statsManager, localCache, opts...) } -func newServer(s settings.Settings, name string, store stats.Store, localCache *freecache.Cache, opts ...settings.Option) *server { +func newServer(s settings.Settings, name string, statsManager stats.Manager, localCache *freecache.Cache, opts ...settings.Option) *server { for _, opt := range opts { opt(&s) } @@ -184,9 +185,9 @@ func newServer(s settings.Settings, name string, store stats.Store, localCache * ret.debugAddress = net.JoinHostPort(s.DebugHost, strconv.Itoa(s.DebugPort)) // setup stats - ret.store = store + ret.store = statsManager.GetStatsStore() ret.scope = ret.store.ScopeWithTags(name, s.ExtraTags) - ret.store.AddStatGenerator(stats.NewRuntimeStats(ret.scope.Scope("go"))) + ret.store.AddStatGenerator(gostats.NewRuntimeStats(ret.scope.Scope("go"))) if localCache != nil { ret.store.AddStatGenerator(limiter.NewLocalCacheStats(localCache, ret.scope.Scope("localcache"))) } diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index 126bb776..4444d2f1 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -2,6 +2,7 @@ package ratelimit import ( "fmt" + "github.com/envoyproxy/ratelimit/src/stats" "strings" "sync" @@ -11,37 +12,10 @@ import ( "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" "github.com/lyft/goruntime/loader" - stats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" ) -type shouldRateLimitStats struct { - redisError stats.Counter - serviceError stats.Counter -} - -func newShouldRateLimitStats(scope stats.Scope) shouldRateLimitStats { - ret := shouldRateLimitStats{} - ret.redisError = scope.NewCounter("redis_error") - ret.serviceError = scope.NewCounter("service_error") - return ret -} - -type serviceStats struct { - configLoadSuccess stats.Counter - configLoadError stats.Counter - shouldRateLimit shouldRateLimitStats -} - -func newServiceStats(scope stats.Scope) serviceStats { - ret := serviceStats{} - ret.configLoadSuccess = scope.NewCounter("config_load_success") - ret.configLoadError = scope.NewCounter("config_load_error") - ret.shouldRateLimit = newShouldRateLimitStats(scope.Scope("call.should_rate_limit")) - return ret -} - type RateLimitServiceServer interface { pb.RateLimitServiceServer GetCurrentConfig() config.RateLimitConfig @@ -55,13 +29,12 @@ type service struct { config config.RateLimitConfig runtimeUpdateEvent chan int cache limiter.RateLimitCache - stats serviceStats - rlStatsScope stats.Scope + stats stats.ServiceStats legacy *legacyService runtimeWatchRoot bool } -func (this *service) reloadConfig() { +func (this *service) reloadConfig(statsManager stats.Manager) { defer func() { if e := recover(); e != nil { configError, ok := e.(config.RateLimitConfigError) @@ -69,7 +42,7 @@ func (this *service) reloadConfig() { panic(e) } - this.stats.configLoadError.Inc() + this.stats.ConfigLoadError.Inc() logger.Errorf("error loading new configuration from runtime: %s", configError.Error()) } }() @@ -84,8 +57,8 @@ func (this *service) reloadConfig() { files = append(files, config.RateLimitConfigToLoad{key, snapshot.Get(key)}) } - newConfig := this.configLoader.Load(files, this.rlStatsScope) - this.stats.configLoadSuccess.Inc() + newConfig := this.configLoader.Load(files, statsManager) + this.stats.ConfigLoadSuccess.Inc() this.configLock.Lock() this.config = newConfig this.configLock.Unlock() @@ -170,12 +143,12 @@ func (this *service) ShouldRateLimit( switch t := err.(type) { case redis.RedisError: { - this.stats.shouldRateLimit.redisError.Inc() + this.stats.ShouldRateLimit.RedisError.Inc() finalError = t } case serviceError: { - this.stats.shouldRateLimit.serviceError.Inc() + this.stats.ShouldRateLimit.ServiceError.Inc() finalError = t } default: @@ -199,7 +172,7 @@ func (this *service) GetCurrentConfig() config.RateLimitConfig { } func NewService(runtime loader.IFace, cache limiter.RateLimitCache, - configLoader config.RateLimitConfigLoader, stats stats.Scope, runtimeWatchRoot bool) RateLimitServiceServer { + configLoader config.RateLimitConfigLoader, statsManager stats.Manager, runtimeWatchRoot bool) RateLimitServiceServer { newService := &service{ runtime: runtime, @@ -208,25 +181,24 @@ func NewService(runtime loader.IFace, cache limiter.RateLimitCache, config: nil, runtimeUpdateEvent: make(chan int), cache: cache, - stats: newServiceStats(stats), - rlStatsScope: stats.Scope("rate_limit"), + stats: statsManager.NewServiceStats(), runtimeWatchRoot: runtimeWatchRoot, } newService.legacy = &legacyService{ s: newService, - shouldRateLimitLegacyStats: newShouldRateLimitLegacyStats(stats), + shouldRateLimitLegacyStats: statsManager.NewShouldRateLimitLegacyStats(), } runtime.AddUpdateCallback(newService.runtimeUpdateEvent) - newService.reloadConfig() + newService.reloadConfig(statsManager) go func() { // No exit right now. for { logger.Debugf("waiting for runtime update") <-newService.runtimeUpdateEvent logger.Debugf("got runtime update and reloading config") - newService.reloadConfig() + newService.reloadConfig(statsManager) } }() diff --git a/src/service/ratelimit_legacy.go b/src/service/ratelimit_legacy.go index 17112675..ac3971e0 100644 --- a/src/service/ratelimit_legacy.go +++ b/src/service/ratelimit_legacy.go @@ -5,7 +5,7 @@ import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/lyft/gostats" + "github.com/envoyproxy/ratelimit/src/stats" "golang.org/x/net/context" ) @@ -17,22 +17,7 @@ type RateLimitLegacyServiceServer interface { // the legacyService receives RateLimitRequests, converts the request, and calls the service's ShouldRateLimit method. type legacyService struct { s *service - shouldRateLimitLegacyStats shouldRateLimitLegacyStats -} - -type shouldRateLimitLegacyStats struct { - reqConversionError stats.Counter - respConversionError stats.Counter - shouldRateLimitError stats.Counter -} - -func newShouldRateLimitLegacyStats(scope stats.Scope) shouldRateLimitLegacyStats { - s := scope.Scope("call.should_rate_limit_legacy") - return shouldRateLimitLegacyStats{ - reqConversionError: s.NewCounter("req_conversion_error"), - respConversionError: s.NewCounter("resp_conversion_error"), - shouldRateLimitError: s.NewCounter("should_rate_limit_error"), - } + shouldRateLimitLegacyStats stats.ShouldRateLimitLegacyStats } func (this *legacyService) ShouldRateLimit( @@ -41,18 +26,18 @@ func (this *legacyService) ShouldRateLimit( request, err := ConvertLegacyRequest(legacyRequest) if err != nil { - this.shouldRateLimitLegacyStats.reqConversionError.Inc() + this.shouldRateLimitLegacyStats.ReqConversionError.Inc() return nil, err } resp, err := this.s.ShouldRateLimit(ctx, request) if err != nil { - this.shouldRateLimitLegacyStats.shouldRateLimitError.Inc() + this.shouldRateLimitLegacyStats.ShouldRateLimitError.Inc() return nil, err } legacyResponse, err := ConvertResponse(resp) if err != nil { - this.shouldRateLimitLegacyStats.respConversionError.Inc() + this.shouldRateLimitLegacyStats.RespConversionError.Inc() return nil, err } diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 589f1c5f..c8fb45e3 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -2,6 +2,7 @@ package runner import ( "github.com/envoyproxy/ratelimit/src/metrics" + "github.com/envoyproxy/ratelimit/src/stats" "io" "math/rand" "net/http" @@ -9,7 +10,7 @@ import ( "sync" "time" - stats "github.com/lyft/gostats" + gostats "github.com/lyft/gostats" "github.com/coocood/freecache" @@ -28,24 +29,24 @@ import ( ) type Runner struct { - statsStore stats.Store - settings settings.Settings - srv server.Server - mu sync.Mutex + statsManager stats.Manager + settings settings.Settings + srv server.Server + mu sync.Mutex } func NewRunner(s settings.Settings) Runner { return Runner{ - statsStore: stats.NewDefaultStore(), - settings: s, + statsManager: stats.NewStatManager(gostats.NewDefaultStore(), s), + settings: s, } } -func (runner *Runner) GetStatsStore() stats.Store { - return runner.statsStore +func (runner *Runner) GetStatsStore() gostats.Store { + return runner.statsManager.GetStatsStore() } -func createLimiter(srv server.Server, s settings.Settings, localCache *freecache.Cache) limiter.RateLimitCache { +func createLimiter(srv server.Server, s settings.Settings, localCache *freecache.Cache, statsManager stats.Manager) limiter.RateLimitCache { switch s.BackendType { case "redis", "": return redis.NewRateLimiterCacheImplFromSettings( @@ -54,14 +55,16 @@ func createLimiter(srv server.Server, s settings.Settings, localCache *freecache srv, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), - s.ExpirationJitterMaxSeconds) + s.ExpirationJitterMaxSeconds, + statsManager) case "memcache": return memcached.NewRateLimitCacheImplFromSettings( s, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), localCache, - srv.Scope()) + srv.Scope(), + statsManager) default: logger.Fatalf("Invalid setting for BackendType: %s", s.BackendType) panic("This line should not be reachable") @@ -92,18 +95,18 @@ func (runner *Runner) Run() { localCache = freecache.NewCache(s.LocalCacheSizeInBytes) } - serverReporter := metrics.NewServerReporter(runner.statsStore.ScopeWithTags("ratelimit_server", s.ExtraTags)) + serverReporter := metrics.NewServerReporter(runner.statsManager.GetStatsStore().ScopeWithTags("ratelimit_server", s.ExtraTags)) - srv := server.NewServer(s, "ratelimit", runner.statsStore, localCache, settings.GrpcUnaryInterceptor(serverReporter.UnaryServerInterceptor())) + srv := server.NewServer(s, "ratelimit", runner.statsManager, localCache, settings.GrpcUnaryInterceptor(serverReporter.UnaryServerInterceptor())) runner.mu.Lock() runner.srv = srv runner.mu.Unlock() service := ratelimit.NewService( srv.Runtime(), - createLimiter(srv, s, localCache), + createLimiter(srv, s, localCache, runner.statsManager), config.NewRateLimitConfigLoaderImpl(), - srv.Scope().Scope("service"), + runner.statsManager, s.RuntimeWatchRoot, ) diff --git a/src/stats/manager.go b/src/stats/manager.go new file mode 100644 index 00000000..a96753b7 --- /dev/null +++ b/src/stats/manager.go @@ -0,0 +1,62 @@ +package stats + +import stats "github.com/lyft/gostats" +import gostats "github.com/lyft/gostats" + +// Manager is the interface that wraps initialization of stat structures. +type Manager interface { + // NewStats provides a RateLimitStats structure associated with a given descriptorKey. + // Multiple calls with the same descriptorKey argument are guaranteed to be equivalent. + NewStats(descriptorKey string) RateLimitStats + // Initializes a ShouldRateLimitStats structure. + // Multiple calls to this method are idempotent. + NewShouldRateLimitStats() ShouldRateLimitStats + // Initializes a ServiceStats structure. + // Multiple calls to this method are idempotent. + NewServiceStats() ServiceStats + // Initializes a ShouldRateLimitLegacyStats structure. + // Multiple calls to this method are idempotent. + NewShouldRateLimitLegacyStats() ShouldRateLimitLegacyStats + // Returns the stats.Store wrapped by the Manager. + GetStatsStore() stats.Store +} + +type ManagerImpl struct { + store gostats.Store + rlStatsScope gostats.Scope + legacyStatsScope gostats.Scope + serviceStatsScope gostats.Scope + shouldRateLimitScope gostats.Scope +} + +// Stats for panic recoveries. +// Identifies if a recovered panic is a redis.RedisError or a ServiceError. +type ShouldRateLimitStats struct { + RedisError gostats.Counter + ServiceError gostats.Counter +} + +// Stats for server errors. +// Keeps failure and success metrics. +type ServiceStats struct { + ConfigLoadSuccess gostats.Counter + ConfigLoadError gostats.Counter + ShouldRateLimit ShouldRateLimitStats +} + +// Legacy Stats for ratelimit errors. +type ShouldRateLimitLegacyStats struct { + ReqConversionError gostats.Counter + RespConversionError gostats.Counter + ShouldRateLimitError gostats.Counter +} + +// Stats for an individual rate limit config entry. +type RateLimitStats struct { + Key string + TotalHits gostats.Counter + OverLimit gostats.Counter + NearLimit gostats.Counter + OverLimitWithLocalCache gostats.Counter + WithinLimit gostats.Counter +} diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go new file mode 100644 index 00000000..48a01b1a --- /dev/null +++ b/src/stats/manager_impl.go @@ -0,0 +1,64 @@ +package stats + +import ( + "github.com/envoyproxy/ratelimit/src/settings" + gostats "github.com/lyft/gostats" + logger "github.com/sirupsen/logrus" +) + +func NewStatManager(store gostats.Store, settings settings.Settings) *ManagerImpl { + serviceScope := store.ScopeWithTags("ratelimit", settings.ExtraTags).Scope("service") + return &ManagerImpl{ + store: store, + rlStatsScope: serviceScope.Scope("rate_limit"), + legacyStatsScope: serviceScope.Scope("call.should_rate_limit_legacy"), + serviceStatsScope: serviceScope, + shouldRateLimitScope: serviceScope.Scope("call.should_rate_limit"), + } +} + +func (this *ManagerImpl) GetStatsStore() gostats.Store { + return this.store +} + +// Create new rate descriptor stats for a descriptor tuple. +// @param key supplies the fully resolved descriptor tuple. +// @return new stats. +func (this *ManagerImpl) NewStats(key string) RateLimitStats { + ret := RateLimitStats{} + logger.Debugf("Creating stats for key: '%s'", key) + ret.Key = key + ret.TotalHits = this.rlStatsScope.NewCounter(key + ".total_hits") + ret.OverLimit = this.rlStatsScope.NewCounter(key + ".over_limit") + ret.NearLimit = this.rlStatsScope.NewCounter(key + ".near_limit") + ret.OverLimitWithLocalCache = this.rlStatsScope.NewCounter(key + ".over_limit_with_local_cache") + ret.WithinLimit = this.rlStatsScope.NewCounter(key + ".within_limit") + return ret +} + +func (this *ManagerImpl) NewShouldRateLimitLegacyStats() ShouldRateLimitLegacyStats { + return ShouldRateLimitLegacyStats{ + ReqConversionError: this.legacyStatsScope.NewCounter("req_conversion_error"), + RespConversionError: this.legacyStatsScope.NewCounter("resp_conversion_error"), + ShouldRateLimitError: this.legacyStatsScope.NewCounter("should_rate_limit_error"), + } +} + +func (this *ManagerImpl) NewShouldRateLimitStats() ShouldRateLimitStats { + ret := ShouldRateLimitStats{} + ret.RedisError = this.shouldRateLimitScope.NewCounter("redis_error") + ret.ServiceError = this.shouldRateLimitScope.NewCounter("service_error") + return ret +} + +func (this *ManagerImpl) NewServiceStats() ServiceStats { + ret := ServiceStats{} + ret.ConfigLoadSuccess = this.serviceStatsScope.NewCounter("config_load_success") + ret.ConfigLoadError = this.serviceStatsScope.NewCounter("config_load_error") + ret.ShouldRateLimit = this.NewShouldRateLimitStats() + return ret +} + +func (this RateLimitStats) GetKey() string { + return this.Key +} diff --git a/test/config/config_test.go b/test/config/config_test.go index 4a244bce..107b3a43 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -9,6 +9,7 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" pb_type "github.com/envoyproxy/go-control-plane/envoy/type/v3" "github.com/envoyproxy/ratelimit/src/config" + mockstats "github.com/envoyproxy/ratelimit/test/mocks/stats" "github.com/lyft/gostats" "github.com/stretchr/testify/assert" ) @@ -24,7 +25,7 @@ func loadFile(path string) []config.RateLimitConfigToLoad { func TestBasicConfig(t *testing.T) { assert := assert.New(t) stats := stats.NewStore(stats.NewNullSink(), false) - rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), stats) + rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), mockstats.NewMockStatManager(stats)) rlConfig.Dump() assert.Nil(rlConfig.GetLimit(nil, "foo_domain", &pb_struct.RateLimitDescriptor{})) assert.Nil(rlConfig.GetLimit(nil, "test-domain", &pb_struct.RateLimitDescriptor{})) @@ -168,7 +169,7 @@ func TestBasicConfig(t *testing.T) { func TestConfigLimitOverride(t *testing.T) { assert := assert.New(t) stats := stats.NewStore(stats.NewNullSink(), false) - rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), stats) + rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), mockstats.NewMockStatManager(stats)) rlConfig.Dump() // No matching domain assert.Nil(rlConfig.GetLimit(nil, "foo_domain", &pb_struct.RateLimitDescriptor{ @@ -261,7 +262,7 @@ func TestEmptyDomain(t *testing.T) { t, func() { config.NewRateLimitConfigImpl( - loadFile("empty_domain.yaml"), stats.NewStore(stats.NewNullSink(), false)) + loadFile("empty_domain.yaml"), mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "empty_domain.yaml: config file cannot have empty domain") } @@ -272,7 +273,7 @@ func TestDuplicateDomain(t *testing.T) { func() { files := loadFile("basic_config.yaml") files = append(files, loadFile("duplicate_domain.yaml")...) - config.NewRateLimitConfigImpl(files, stats.NewStore(stats.NewNullSink(), false)) + config.NewRateLimitConfigImpl(files, mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "duplicate_domain.yaml: duplicate domain 'test-domain' in config file") } @@ -283,7 +284,7 @@ func TestEmptyKey(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("empty_key.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "empty_key.yaml: descriptor has empty key") } @@ -294,7 +295,7 @@ func TestDuplicateKey(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("duplicate_key.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "duplicate_key.yaml: duplicate descriptor composite key 'test-domain.key1_value1'") } @@ -305,7 +306,7 @@ func TestBadLimitUnit(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("bad_limit_unit.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "bad_limit_unit.yaml: invalid rate limit unit 'foo'") } @@ -316,7 +317,7 @@ func TestBadYaml(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("bad_yaml.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "bad_yaml.yaml: error loading config file: yaml: line 2: found unexpected end of stream") } @@ -327,7 +328,7 @@ func TestMisspelledKey(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("misspelled_key.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "misspelled_key.yaml: config error, unknown key 'ratelimit'") @@ -336,7 +337,8 @@ func TestMisspelledKey(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("misspelled_key2.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) + }, "misspelled_key2.yaml: config error, unknown key 'requestsperunit'") } @@ -347,7 +349,7 @@ func TestNonStringKey(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("non_string_key.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "non_string_key.yaml: config error, key is not of type string: 0.25") } @@ -358,7 +360,7 @@ func TestNonMapList(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("non_map_list.yaml"), - stats.NewStore(stats.NewNullSink(), false)) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) }, "non_map_list.yaml: config error, yaml file contains list of type other than map: a") } diff --git a/test/limiter/base_limiter_test.go b/test/limiter/base_limiter_test.go index 41aa0e5b..e1b8167c 100644 --- a/test/limiter/base_limiter_test.go +++ b/test/limiter/base_limiter_test.go @@ -1,6 +1,7 @@ package limiter import ( + mockstats "github.com/envoyproxy/ratelimit/test/mocks/stats" "math/rand" "testing" @@ -22,10 +23,11 @@ func TestGenerateCacheKeys(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) jitterSource := mock_utils.NewMockJitterRandSource(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) + sm := mockstats.NewMockStatManager(statsStore) timeSource.EXPECT().UnixNow().Return(int64(1234)) - baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "") + baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value()) cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, 1) assert.Equal(1, len(cacheKeys)) @@ -40,10 +42,11 @@ func TestGenerateCacheKeysPrefix(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) jitterSource := mock_utils.NewMockJitterRandSource(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) + sm := mockstats.NewMockStatManager(statsStore) timeSource.EXPECT().UnixNow().Return(int64(1234)) - baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "prefix:") + baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "prefix:", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value()) cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, 1) assert.Equal(1, len(cacheKeys)) @@ -57,7 +60,8 @@ func TestOverLimitWithLocalCache(t *testing.T) { defer controller.Finish() localCache := freecache.NewCache(100) localCache.Set([]byte("key"), []byte("value"), 100) - baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, localCache, 0.8, "") + sm := mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)) + baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, localCache, 0.8, "", sm) // Returns true, as local cache contains over limit value for the key. assert.Equal(true, baseRateLimit.IsOverLimitWithLocalCache("key")) } @@ -66,11 +70,12 @@ func TestNoOverLimitWithLocalCache(t *testing.T) { assert := assert.New(t) controller := gomock.NewController(t) defer controller.Finish() - baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, nil, 0.8, "") + sm := mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)) + baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, nil, 0.8, "", sm) // Returns false, as local cache is nil. assert.Equal(false, baseRateLimit.IsOverLimitWithLocalCache("domain_key_value_1234")) localCache := freecache.NewCache(100) - baseRateLimitWithLocalCache := limiter.NewBaseRateLimit(nil, nil, 3600, localCache, 0.8, "") + baseRateLimitWithLocalCache := limiter.NewBaseRateLimit(nil, nil, 3600, localCache, 0.8, "", sm) // Returns false, as local cache does not contain value for cache key. assert.Equal(false, baseRateLimitWithLocalCache.IsOverLimitWithLocalCache("domain_key_value_1234")) } @@ -79,7 +84,8 @@ func TestGetResponseStatusEmptyKey(t *testing.T) { assert := assert.New(t) controller := gomock.NewController(t) defer controller.Finish() - baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, nil, 0.8, "") + sm := mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)) + baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, nil, 0.8, "", sm) responseStatus := baseRateLimit.GetResponseDescriptorStatus("", nil, false, 1) assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) assert.Equal(uint32(0), responseStatus.GetLimitRemaining()) @@ -92,8 +98,9 @@ func TestGetResponseStatusOverLimitWithLocalCache(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) timeSource.EXPECT().UnixNow().Return(int64(1234)) statsStore := stats.NewStore(stats.NewNullSink(), false) - baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "") - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + sm := mockstats.NewMockStatManager(statsStore) + baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 4, 5) // As `isOverLimitWithLocalCache` is passed as `true`, immediate response is returned with no checks of the limits. responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, true, 2) @@ -112,8 +119,9 @@ func TestGetResponseStatusOverLimit(t *testing.T) { timeSource.EXPECT().UnixNow().Return(int64(1234)) statsStore := stats.NewStore(stats.NewNullSink(), false) localCache := freecache.NewCache(100) - baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, localCache, 0.8, "") - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + sm := mockstats.NewMockStatManager(statsStore) + baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, localCache, 0.8, "", sm) + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 7, 4, 5) responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) assert.Equal(pb.RateLimitResponse_OVER_LIMIT, responseStatus.GetCode()) @@ -133,8 +141,9 @@ func TestGetResponseStatusBelowLimit(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) timeSource.EXPECT().UnixNow().Return(int64(1234)) statsStore := stats.NewStore(stats.NewNullSink(), false) - baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "") - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + sm := mockstats.NewMockStatManager(statsStore) + baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 9, 10) responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) diff --git a/test/memcached/cache_impl_test.go b/test/memcached/cache_impl_test.go index 652ac7c7..d663f675 100644 --- a/test/memcached/cache_impl_test.go +++ b/test/memcached/cache_impl_test.go @@ -5,6 +5,7 @@ package memcached_test import ( + mockstats "github.com/envoyproxy/ratelimit/test/mocks/stats" "math/rand" "strconv" "testing" @@ -35,7 +36,8 @@ func TestMemcached(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) client := mock_memcached.NewMockClient(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8, "") + sm := mockstats.NewMockStatManager(statsStore) + cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "") timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) client.EXPECT().GetMulti([]string{"domain_key_value_1234"}).Return( @@ -44,7 +46,7 @@ func TestMemcached(t *testing.T) { client.EXPECT().Increment("domain_key_value_1234", uint64(1)).Return(uint64(5), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -68,7 +70,7 @@ func TestMemcached(t *testing.T) { }, 1) limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key2_value2_subkey2_subvalue2", statsStore)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[1].Limit, timeSource)}}, @@ -98,8 +100,8 @@ func TestMemcached(t *testing.T) { {{"key3", "value3"}, {"subkey3", "subvalue3"}}, }, 1) limits = []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, "key3_value3", statsStore), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, "key3_value3_subkey3_subvalue3", statsStore)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3")), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}, @@ -125,7 +127,8 @@ func TestMemcachedGetError(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) client := mock_memcached.NewMockClient(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8, "") + sm := mockstats.NewMockStatManager(statsStore) + cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "") timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) client.EXPECT().GetMulti([]string{"domain_key_value_1234"}).Return( @@ -134,7 +137,7 @@ func TestMemcachedGetError(t *testing.T) { client.EXPECT().Increment("domain_key_value_1234", uint64(1)).Return(uint64(5), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -152,7 +155,7 @@ func TestMemcachedGetError(t *testing.T) { client.EXPECT().Increment("domain_key_value1_1234", uint64(1)).Return(uint64(5), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value1"}}}, 1) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value1", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value1"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -211,7 +214,8 @@ func TestOverLimitWithLocalCache(t *testing.T) { localCache := freecache.NewCache(100) sink := &common.TestStatSink{} statsStore := stats.NewStore(sink, true) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, localCache, statsStore, 0.8, "") + sm := mockstats.NewMockStatManager(statsStore) + cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, localCache, sm, 0.8, "") localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) // Test Near Limit Stats. Under Near Limit Ratio @@ -224,7 +228,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key4_value4", statsStore)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -307,7 +311,8 @@ func TestNearLimit(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) client := mock_memcached.NewMockClient(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8, "") + sm := mockstats.NewMockStatManager(statsStore) + cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "") // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) @@ -319,7 +324,7 @@ func TestNearLimit(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key4_value4", statsStore)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -372,7 +377,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key5_value5_1234", uint64(3)).Return(uint64(5), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key5_value5", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -390,7 +395,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key6_value6_1234", uint64(2)).Return(uint64(7), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) - limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, "key6_value6", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -408,7 +413,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key7_value7_1234", uint64(3)).Return(uint64(19), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key7_value7", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -426,7 +431,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key8_value8_1234", uint64(3)).Return(uint64(22), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key8_value8", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -444,7 +449,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key9_value9_1234", uint64(7)).Return(uint64(22), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key9_value9", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -462,7 +467,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key10_value10_1234", uint64(3)).Return(uint64(30), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key10_value10", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -484,7 +489,8 @@ func TestMemcacheWithJitter(t *testing.T) { client := mock_memcached.NewMockClient(controller) jitterSource := mock_utils.NewMockJitterRandSource(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, rand.New(jitterSource), 3600, nil, statsStore, 0.8, "") + sm := mockstats.NewMockStatManager(statsStore) + cache := memcached.NewRateLimitCacheImpl(client, timeSource, rand.New(jitterSource), 3600, nil, sm, 0.8, "") timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) jitterSource.EXPECT().Int63().Return(int64(100)) @@ -505,7 +511,7 @@ func TestMemcacheWithJitter(t *testing.T) { ).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -526,7 +532,8 @@ func TestMemcacheAdd(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) client := mock_memcached.NewMockClient(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8, "") + sm := mockstats.NewMockStatManager(statsStore) + cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "") // Test a race condition with the initial add timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) @@ -547,7 +554,7 @@ func TestMemcacheAdd(t *testing.T) { uint64(2), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -571,7 +578,7 @@ func TestMemcacheAdd(t *testing.T) { ).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key2", "value2"}}}, 1) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key2_value2", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -598,7 +605,9 @@ func TestNewRateLimitCacheImplFromSettingsWhenSrvCannotBeResolved(t *testing.T) s.ExpirationJitterMaxSeconds = 300 s.MemcacheSrv = "_something._tcp.example.invalid" - assert.Panics(func() { memcached.NewRateLimitCacheImplFromSettings(s, timeSource, nil, nil, statsStore) }) + assert.Panics(func() { + memcached.NewRateLimitCacheImplFromSettings(s, timeSource, nil, nil, statsStore, mockstats.NewMockStatManager(statsStore)) + }) } func TestNewRateLimitCacheImplFromSettingsWhenHostAndPortAndSrvAreBothSet(t *testing.T) { @@ -616,7 +625,9 @@ func TestNewRateLimitCacheImplFromSettingsWhenHostAndPortAndSrvAreBothSet(t *tes s.MemcacheSrv = "_something._tcp.example.invalid" s.MemcacheHostPort = []string{"example.org:11211"} - assert.Panics(func() { memcached.NewRateLimitCacheImplFromSettings(s, timeSource, nil, nil, statsStore) }) + assert.Panics(func() { + memcached.NewRateLimitCacheImplFromSettings(s, timeSource, nil, nil, statsStore, mockstats.NewMockStatManager(statsStore)) + }) } func getMultiResult(vals map[string]int) map[string]*memcache.Item { diff --git a/test/mocks/config/config.go b/test/mocks/config/config.go index 38d5b347..b34328dc 100644 --- a/test/mocks/config/config.go +++ b/test/mocks/config/config.go @@ -8,8 +8,8 @@ import ( context "context" envoy_extensions_common_ratelimit_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" config "github.com/envoyproxy/ratelimit/src/config" + stats "github.com/envoyproxy/ratelimit/src/stats" gomock "github.com/golang/mock/gomock" - stats "github.com/lyft/gostats" reflect "reflect" ) @@ -88,7 +88,7 @@ func (m *MockRateLimitConfigLoader) EXPECT() *MockRateLimitConfigLoaderMockRecor } // Load mocks base method -func (m *MockRateLimitConfigLoader) Load(arg0 []config.RateLimitConfigToLoad, arg1 stats.Scope) config.RateLimitConfig { +func (m *MockRateLimitConfigLoader) Load(arg0 []config.RateLimitConfigToLoad, arg1 stats.Manager) config.RateLimitConfig { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Load", arg0, arg1) ret0, _ := ret[0].(config.RateLimitConfig) diff --git a/test/mocks/stats/manager.go b/test/mocks/stats/manager.go new file mode 100644 index 00000000..52282e72 --- /dev/null +++ b/test/mocks/stats/manager.go @@ -0,0 +1,56 @@ +package stats + +import ( + "github.com/envoyproxy/ratelimit/src/stats" + gostats "github.com/lyft/gostats" + logger "github.com/sirupsen/logrus" +) + +type MockStatManager struct { + store gostats.Store +} + +func (m *MockStatManager) GetStatsStore() gostats.Store { + return m.store +} + +func (m *MockStatManager) NewShouldRateLimitStats() stats.ShouldRateLimitStats { + s := m.store.Scope("call.should_rate_limit") + ret := stats.ShouldRateLimitStats{} + ret.RedisError = s.NewCounter("redis_error") + ret.ServiceError = s.NewCounter("service_error") + return ret +} + +func (m *MockStatManager) NewServiceStats() stats.ServiceStats { + ret := stats.ServiceStats{} + ret.ConfigLoadSuccess = m.store.NewCounter("config_load_success") + ret.ConfigLoadError = m.store.NewCounter("config_load_error") + ret.ShouldRateLimit = m.NewShouldRateLimitStats() + return ret +} + +func (m *MockStatManager) NewShouldRateLimitLegacyStats() stats.ShouldRateLimitLegacyStats { + s := m.store.Scope("call.should_rate_limit_legacy") + return stats.ShouldRateLimitLegacyStats{ + ReqConversionError: s.NewCounter("req_conversion_error"), + RespConversionError: s.NewCounter("resp_conversion_error"), + ShouldRateLimitError: s.NewCounter("should_rate_limit_error"), + } +} + +func (m *MockStatManager) NewStats(key string) stats.RateLimitStats { + ret := stats.RateLimitStats{} + logger.Debugf("outputing test gostats %s", key) + ret.Key = key + ret.TotalHits = m.store.NewCounter(key + ".total_hits") + ret.OverLimit = m.store.NewCounter(key + ".over_limit") + ret.NearLimit = m.store.NewCounter(key + ".near_limit") + ret.OverLimitWithLocalCache = m.store.NewCounter(key + ".over_limit_with_local_cache") + ret.WithinLimit = m.store.NewCounter(key + ".within_limit") + return ret +} + +func NewMockStatManager(store gostats.Store) stats.Manager { + return &MockStatManager{store: store} +} diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index 6c190ea7..37bca184 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -2,6 +2,7 @@ package redis_test import ( "context" + "github.com/envoyproxy/ratelimit/test/mocks/stats" "runtime" "testing" "time" @@ -10,7 +11,7 @@ import ( "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/redis" "github.com/envoyproxy/ratelimit/src/utils" - stats "github.com/lyft/gostats" + gostats "github.com/lyft/gostats" "math/rand" @@ -40,13 +41,14 @@ func BenchmarkParallelDoLimit(b *testing.B) { mkDoLimitBench := func(pipelineWindow time.Duration, pipelineLimit int) func(*testing.B) { return func(b *testing.B) { - statsStore := stats.NewStore(stats.NewNullSink(), false) + statsStore := gostats.NewStore(gostats.NewNullSink(), false) + sm := stats.NewMockStatManager(statsStore) client := redis.NewClientImpl(statsStore, false, "", "single", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit) defer client.Close() - cache := redis.NewFixedRateLimitCacheImpl(client, nil, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, "") + cache := redis.NewFixedRateLimitCacheImpl(client, nil, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, "", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} // wait for the pool to fill up for { diff --git a/test/redis/fixed_cache_impl_test.go b/test/redis/fixed_cache_impl_test.go index 65883f4b..e0723333 100644 --- a/test/redis/fixed_cache_impl_test.go +++ b/test/redis/fixed_cache_impl_test.go @@ -1,6 +1,7 @@ package redis_test import ( + "github.com/envoyproxy/ratelimit/test/mocks/stats" "testing" "github.com/coocood/freecache" @@ -11,7 +12,7 @@ import ( "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" "github.com/envoyproxy/ratelimit/src/utils" - stats "github.com/lyft/gostats" + gostats "github.com/lyft/gostats" "math/rand" @@ -36,17 +37,18 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { assert := assert.New(t) controller := gomock.NewController(t) defer controller.Finish() + statsStore := gostats.NewStore(gostats.NewNullSink(), false) + sm := stats.NewMockStatManager(statsStore) client := mock_redis.NewMockClient(controller) perSecondClient := mock_redis.NewMockClient(controller) timeSource := mock_utils.NewMockTimeSource(controller) var cache limiter.RateLimitCache if usePerSecondRedis { - cache = redis.NewFixedRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "") + cache = redis.NewFixedRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "", sm) } else { - cache = redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "") + cache = redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "", sm) } - statsStore := stats.NewStore(stats.NewNullSink(), false) timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) var clientUsed *mock_redis.MockClient @@ -61,7 +63,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -86,7 +88,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { }, 1) limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key2_value2_subkey2_subvalue2", statsStore)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[1].Limit, timeSource)}}, @@ -113,8 +115,8 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { {{"key3", "value3"}, {"subkey3", "subvalue3"}}, }, 1) limits = []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, "key3_value3", statsStore), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, "key3_value3_subkey3_subvalue3", statsStore)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3")), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}, @@ -131,7 +133,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { } } -func testLocalCacheStats(localCacheStats stats.StatGenerator, statsStore stats.Store, sink *common.TestStatSink, +func testLocalCacheStats(localCacheStats gostats.StatGenerator, statsStore gostats.Store, sink *common.TestStatSink, expectedHitCount int, expectedMissCount int, expectedLookUpCount int, expectedExpiredCount int, expectedEntryCount int) func(*testing.T) { return func(t *testing.T) { @@ -175,9 +177,10 @@ func TestOverLimitWithLocalCache(t *testing.T) { client := mock_redis.NewMockClient(controller) timeSource := mock_utils.NewMockTimeSource(controller) localCache := freecache.NewCache(100) - cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8, "") + statsStore := gostats.NewStore(gostats.NewNullSink(), false) + sm := stats.NewMockStatManager(statsStore) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8, "", sm) sink := &common.TestStatSink{} - statsStore := stats.NewStore(sink, true) localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) // Test Near Limit Stats. Under Near Limit Ratio @@ -190,7 +193,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key4_value4", statsStore)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -271,8 +274,9 @@ func TestNearLimit(t *testing.T) { client := mock_redis.NewMockClient(controller) timeSource := mock_utils.NewMockTimeSource(controller) - cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "") - statsStore := stats.NewStore(stats.NewNullSink(), false) + statsStore := gostats.NewStore(gostats.NewNullSink(), false) + sm := stats.NewMockStatManager(statsStore) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "", sm) // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) @@ -284,7 +288,7 @@ func TestNearLimit(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key4_value4", statsStore)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -336,7 +340,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key5_value5", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -353,7 +357,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) - limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, "key6_value6", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -370,7 +374,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key7_value7", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -387,7 +391,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key8_value8", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -404,7 +408,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key9_value9", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -421,7 +425,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key10_value10", statsStore)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -440,8 +444,9 @@ func TestRedisWithJitter(t *testing.T) { client := mock_redis.NewMockClient(controller) timeSource := mock_utils.NewMockTimeSource(controller) jitterSource := mock_utils.NewMockJitterRandSource(controller) - cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil, 0.8, "") - statsStore := stats.NewStore(stats.NewNullSink(), false) + statsStore := gostats.NewStore(gostats.NewNullSink(), false) + sm := stats.NewMockStatManager(statsStore) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil, 0.8, "", sm) timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) jitterSource.EXPECT().Int63().Return(int64(100)) @@ -450,7 +455,7 @@ func TestRedisWithJitter(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, diff --git a/test/service/ratelimit_legacy_test.go b/test/service/ratelimit_legacy_test.go index d90f202e..0c0a17aa 100644 --- a/test/service/ratelimit_legacy_test.go +++ b/test/service/ratelimit_legacy_test.go @@ -1,6 +1,7 @@ package ratelimit_test import ( + "github.com/envoyproxy/ratelimit/src/stats" "testing" core_legacy "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" @@ -14,7 +15,6 @@ import ( "github.com/envoyproxy/ratelimit/src/service" "github.com/envoyproxy/ratelimit/test/common" "github.com/golang/mock/gomock" - "github.com/lyft/gostats" "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) @@ -80,7 +80,7 @@ func TestServiceLegacy(test *testing.T) { barrier := newBarrier() t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Scope) { barrier.signal() }).Return(t.config) + func([]config.RateLimitConfigToLoad, stats.Manager) { barrier.signal() }).Return(t.config) t.runtimeUpdateCallback <- 1 barrier.wait() @@ -93,7 +93,7 @@ func TestServiceLegacy(test *testing.T) { } limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key", t.statStore), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key")), nil} legacyLimits, err := convertRatelimits(limits) if err != nil { @@ -120,7 +120,7 @@ func TestServiceLegacy(test *testing.T) { // Config load failure. t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Scope) { + func([]config.RateLimitConfigToLoad, stats.Manager) { defer barrier.signal() panic(config.RateLimitConfigError("load error")) }) @@ -130,7 +130,7 @@ func TestServiceLegacy(test *testing.T) { // Config should still be valid. Also make sure order does not affect results. limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key", t.statStore)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"))} legacyLimits, err = convertRatelimits(limits) if err != nil { t.assert.FailNow(err.Error()) @@ -193,7 +193,7 @@ func TestCacheErrorLegacy(test *testing.T) { if err != nil { t.assert.FailNow(err.Error()) } - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key", t.statStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"))} t.config.EXPECT().GetLimit(nil, "different-domain", req.Descriptors[0]).Return(limits[0]) t.cache.EXPECT().DoLimit(nil, req, limits).Do( func(context.Context, *pb.RateLimitRequest, []*config.RateLimit) { @@ -218,10 +218,10 @@ func TestInitialLoadErrorLegacy(test *testing.T) { t.snapshot.EXPECT().Get("config.basic_config").Return("fake_yaml").MinTimes(1) t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Scope) { + func([]config.RateLimitConfigToLoad, stats.Manager) { panic(config.RateLimitConfigError("load error")) }) - service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statStore, true) + service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statsManager, true) request := common.NewRateLimitRequestLegacy("test-domain", [][][2]string{{{"hello", "world"}}}, 1) response, err := service.GetLegacyService().ShouldRateLimit(nil, request) diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index 12c77926..787fbf2b 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -1,6 +1,7 @@ package ratelimit_test import ( + "github.com/envoyproxy/ratelimit/src/stats" "sync" "testing" @@ -13,8 +14,9 @@ import ( mock_limiter "github.com/envoyproxy/ratelimit/test/mocks/limiter" mock_loader "github.com/envoyproxy/ratelimit/test/mocks/runtime/loader" mock_snapshot "github.com/envoyproxy/ratelimit/test/mocks/runtime/snapshot" + mock_stats "github.com/envoyproxy/ratelimit/test/mocks/stats" "github.com/golang/mock/gomock" - stats "github.com/lyft/gostats" + gostats "github.com/lyft/gostats" "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) @@ -55,7 +57,8 @@ type rateLimitServiceTestSuite struct { configLoader *mock_config.MockRateLimitConfigLoader config *mock_config.MockRateLimitConfig runtimeUpdateCallback chan<- int - statStore stats.Store + statsManager stats.Manager + statStore gostats.Store } func commonSetup(t *testing.T) rateLimitServiceTestSuite { @@ -67,7 +70,8 @@ func commonSetup(t *testing.T) rateLimitServiceTestSuite { ret.cache = mock_limiter.NewMockRateLimitCache(ret.controller) ret.configLoader = mock_config.NewMockRateLimitConfigLoader(ret.controller) ret.config = mock_config.NewMockRateLimitConfig(ret.controller) - ret.statStore = stats.NewStore(stats.NewNullSink(), false) + ret.statStore = gostats.NewStore(gostats.NewNullSink(), false) + ret.statsManager = mock_stats.NewMockStatManager(ret.statStore) return ret } @@ -82,7 +86,7 @@ func (this *rateLimitServiceTestSuite) setupBasicService() ratelimit.RateLimitSe this.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Return(this.config) - return ratelimit.NewService(this.runtime, this.cache, this.configLoader, this.statStore, true) + return ratelimit.NewService(this.runtime, this.cache, this.configLoader, this.statsManager, true) } func TestService(test *testing.T) { @@ -109,7 +113,7 @@ func TestService(test *testing.T) { barrier := newBarrier() t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Scope) { barrier.signal() }).Return(t.config) + func([]config.RateLimitConfigToLoad, stats.Manager) { barrier.signal() }).Return(t.config) t.runtimeUpdateCallback <- 1 barrier.wait() @@ -117,7 +121,7 @@ func TestService(test *testing.T) { request = common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key", t.statStore), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key")), nil} t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) @@ -139,8 +143,8 @@ func TestService(test *testing.T) { // Config load failure. t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Scope) { - barrier.signal() + func([]config.RateLimitConfigToLoad, stats.Manager) { + defer barrier.signal() panic(config.RateLimitConfigError("load error")) }) t.runtimeUpdateCallback <- 1 @@ -149,7 +153,7 @@ func TestService(test *testing.T) { // Config should still be valid. Also make sure order does not affect results. limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key", t.statStore)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"))} t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) t.cache.EXPECT().DoLimit(nil, request, limits).Return( @@ -201,7 +205,7 @@ func TestCacheError(test *testing.T) { service := t.setupBasicService() request := common.NewRateLimitRequest("different-domain", [][][2]string{{{"foo", "bar"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key", t.statStore)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"))} t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.cache.EXPECT().DoLimit(nil, request, limits).Do( func(context.Context, *pb.RateLimitRequest, []*config.RateLimit) { @@ -225,10 +229,10 @@ func TestInitialLoadError(test *testing.T) { t.snapshot.EXPECT().Get("config.basic_config").Return("fake_yaml").MinTimes(1) t.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Scope) { + func([]config.RateLimitConfigToLoad, stats.Manager) { panic(config.RateLimitConfigError("load error")) }) - service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statStore, true) + service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statsManager, true) request := common.NewRateLimitRequest("test-domain", [][][2]string{{{"hello", "world"}}}, 1) response, err := service.ShouldRateLimit(nil, request) From edab0ecf66e186cf81459368a54990489a2c49e8 Mon Sep 17 00:00:00 2001 From: lmajercak-wish Date: Wed, 7 Jul 2021 17:31:53 -0500 Subject: [PATCH 013/181] Explicit unlimited ratelimits (#261) Signed-off-by: lmajercak-wish --- README.md | 23 ++++++++++++ examples/ratelimit/config/example.yaml | 6 +++ src/config/config.go | 7 ++-- src/config/config_impl.go | 30 +++++++++++---- src/service/ratelimit.go | 52 +++++++++++++++++++------- test/config/basic_config.yaml | 4 ++ test/config/config_test.go | 22 +++++++++++ test/config/unlimited_with_unit.yaml | 7 ++++ test/limiter/base_limiter_test.go | 10 ++--- test/memcached/cache_impl_test.go | 34 ++++++++--------- test/redis/bench_test.go | 2 +- test/redis/fixed_cache_impl_test.go | 26 ++++++------- test/service/ratelimit_legacy_test.go | 6 +-- test/service/ratelimit_test.go | 45 ++++++++++++++++++++-- 14 files changed, 208 insertions(+), 66 deletions(-) create mode 100644 test/config/unlimited_with_unit.yaml diff --git a/README.md b/README.md index 6e4cdee5..9c24a5ad 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,7 @@ - [Example 2](#example-2) - [Example 3](#example-3) - [Example 4](#example-4) + - [Example 5](#example-5) - [Loading Configuration](#loading-configuration) - [Log Format](#log-format) - [Request Fields](#request-fields) @@ -329,6 +330,28 @@ descriptors: unit: second ``` +#### Example 5 + +We can also define unlimited rate limit descriptors: + +```yaml +domain: internal +descriptors: + - key: ldap + rate_limit: + unlimited: true + + - key: azure + rate_limit: + unit: minute + requests_per_unit: 100 +``` + +For an unlimited descriptor, the request will not be sent to the underlying cache (Redis/Memcached), but will be quickly returned locally by the ratelimit instance. +This can be useful for collecting statistics, or if one wants to define a descriptor that has no limit but the client wants to distinguish between such descriptor and one that does not exist. + +The return value for unlimited descriptors will be an OK status code with the LimitRemaining field set to MaxUint32 value. + ## Loading Configuration The Ratelimit service uses a library written by Lyft called [goruntime](https://github.com/lyft/goruntime) to do configuration loading. Goruntime monitors diff --git a/examples/ratelimit/config/example.yaml b/examples/ratelimit/config/example.yaml index 03e2f783..64bacdf0 100644 --- a/examples/ratelimit/config/example.yaml +++ b/examples/ratelimit/config/example.yaml @@ -27,3 +27,9 @@ descriptors: rate_limit: unit: second requests_per_unit: 1 + - key: bay + rate_limit: + unlimited: true + - key: qux + rate_limit: + unlimited: true diff --git a/src/config/config.go b/src/config/config.go index dbaf8968..c7f581e3 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -16,9 +16,10 @@ func (e RateLimitConfigError) Error() string { // Wrapper for an individual rate limit config entry which includes the defined limit and stats. type RateLimit struct { - FullKey string - Stats stats.RateLimitStats - Limit *pb.RateLimitResponse_RateLimit + FullKey string + Stats stats.RateLimitStats + Limit *pb.RateLimitResponse_RateLimit + Unlimited bool } // Interface for interacting with a loaded rate limit config. diff --git a/src/config/config_impl.go b/src/config/config_impl.go index 7b48d326..0664e5b2 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -15,6 +15,7 @@ import ( type yamlRateLimit struct { RequestsPerUnit uint32 `yaml:"requests_per_unit"` Unit string + Unlimited bool `yaml:"unlimited"` } type yamlDescriptor struct { @@ -51,17 +52,19 @@ var validKeys = map[string]bool{ "rate_limit": true, "unit": true, "requests_per_unit": true, + "unlimited": true, } // Create a new rate limit config entry. // @param requestsPerUnit supplies the requests per unit of time for the entry. // @param unit supplies the unit of time for the entry. // @param rlStats supplies the stats structure associated with the RateLimit +// @param unlimited supplies whether the rate limit is unlimited // @return the new config entry. func NewRateLimit( - requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stats.RateLimitStats) *RateLimit { + requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stats.RateLimitStats, unlimited bool) *RateLimit { - return &RateLimit{FullKey: rlStats.GetKey(), Stats: rlStats, Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}} + return &RateLimit{FullKey: rlStats.GetKey(), Stats: rlStats, Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}, Unlimited: unlimited} } // Dump an individual descriptor for debugging purposes. @@ -112,19 +115,29 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p var rateLimit *RateLimit = nil var rateLimitDebugString string = "" if descriptorConfig.RateLimit != nil { + unlimited := descriptorConfig.RateLimit.Unlimited + value, present := pb.RateLimitResponse_RateLimit_Unit_value[strings.ToUpper(descriptorConfig.RateLimit.Unit)] - if !present || value == int32(pb.RateLimitResponse_RateLimit_UNKNOWN) { + validUnit := present && value != int32(pb.RateLimitResponse_RateLimit_UNKNOWN) + + if unlimited { + if validUnit { + panic(newRateLimitConfigError( + config, + fmt.Sprintf("should not specify rate limit unit when unlimited"))) + } + } else if !validUnit { panic(newRateLimitConfigError( config, fmt.Sprintf("invalid rate limit unit '%s'", descriptorConfig.RateLimit.Unit))) } rateLimit = NewRateLimit( - descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), statsManager.NewStats(newParentKey)) + descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), statsManager.NewStats(newParentKey), unlimited) rateLimitDebugString = fmt.Sprintf( - " ratelimit={requests_per_unit=%d, unit=%s}", rateLimit.Limit.RequestsPerUnit, - rateLimit.Limit.Unit.String()) + " ratelimit={requests_per_unit=%d, unit=%s, unlimited=%t}", rateLimit.Limit.RequestsPerUnit, + rateLimit.Limit.Unit.String(), rateLimit.Unlimited) } logger.Debugf( @@ -167,6 +180,8 @@ func validateYamlKeys(config RateLimitConfigToLoad, config_map map[interface{}]i case string: // int is a leaf type in ratelimit config. No need to keep validating. case int: + // bool is a leaf type in ratelimit config. No need to keep validating. + case bool: // nil case is an incorrectly formed yaml. However, because this function's purpose is to validate // the yaml's keys we don't panic here. case nil: @@ -240,7 +255,8 @@ func (this *rateLimitConfigImpl) GetLimit( rateLimit = NewRateLimit( descriptor.GetLimit().GetRequestsPerUnit(), rateLimitOverrideUnit, - this.statsManager.NewStats(rateLimitKey)) + this.statsManager.NewStats(rateLimitKey), + false) return rateLimit } diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index 4444d2f1..8ac20019 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -3,6 +3,7 @@ package ratelimit import ( "fmt" "github.com/envoyproxy/ratelimit/src/stats" + "math" "strings" "sync" @@ -76,16 +77,13 @@ func checkServiceErr(something bool, msg string) { } } -func (this *service) shouldRateLimitWorker( - ctx context.Context, request *pb.RateLimitRequest) *pb.RateLimitResponse { - - checkServiceErr(request.Domain != "", "rate limit domain must not be empty") - checkServiceErr(len(request.Descriptors) != 0, "rate limit descriptor list must not be empty") - +func (this *service) constructLimitsToCheck(request *pb.RateLimitRequest, ctx context.Context) ([]*config.RateLimit, []bool) { snappedConfig := this.GetCurrentConfig() checkServiceErr(snappedConfig != nil, "no rate limit configuration loaded") limitsToCheck := make([]*config.RateLimit, len(request.Descriptors)) + isUnlimited := make([]bool, len(request.Descriptors)) + for i, descriptor := range request.Descriptors { if logger.IsLevelEnabled(logger.DebugLevel) { var descriptorEntryStrings []string @@ -102,14 +100,33 @@ func (this *service) shouldRateLimitWorker( if limitsToCheck[i] == nil { logger.Debugf("descriptor does not match any limit, no limits applied") } else { - logger.Debugf( - "applying limit: %d requests per %s", - limitsToCheck[i].Limit.RequestsPerUnit, - limitsToCheck[i].Limit.Unit.String(), - ) + if limitsToCheck[i].Unlimited { + logger.Debugf("descriptor is unlimited, not passing to the cache") + } else { + logger.Debugf( + "applying limit: %d requests per %s", + limitsToCheck[i].Limit.RequestsPerUnit, + limitsToCheck[i].Limit.Unit.String(), + ) + } } } + + if limitsToCheck[i] != nil && limitsToCheck[i].Unlimited { + isUnlimited[i] = true + limitsToCheck[i] = nil + } } + return limitsToCheck, isUnlimited +} + +func (this *service) shouldRateLimitWorker( + ctx context.Context, request *pb.RateLimitRequest) *pb.RateLimitResponse { + + checkServiceErr(request.Domain != "", "rate limit domain must not be empty") + checkServiceErr(len(request.Descriptors) != 0, "rate limit descriptor list must not be empty") + + limitsToCheck, isUnlimited := this.constructLimitsToCheck(request, ctx) responseDescriptorStatuses := this.cache.DoLimit(ctx, request, limitsToCheck) assert.Assert(len(limitsToCheck) == len(responseDescriptorStatuses)) @@ -118,9 +135,16 @@ func (this *service) shouldRateLimitWorker( response.Statuses = make([]*pb.RateLimitResponse_DescriptorStatus, len(request.Descriptors)) finalCode := pb.RateLimitResponse_OK for i, descriptorStatus := range responseDescriptorStatuses { - response.Statuses[i] = descriptorStatus - if descriptorStatus.Code == pb.RateLimitResponse_OVER_LIMIT { - finalCode = descriptorStatus.Code + if isUnlimited[i] { + response.Statuses[i] = &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OK, + LimitRemaining: math.MaxUint32, + } + } else { + response.Statuses[i] = descriptorStatus + if descriptorStatus.Code == pb.RateLimitResponse_OVER_LIMIT { + finalCode = descriptorStatus.Code + } } } diff --git a/test/config/basic_config.yaml b/test/config/basic_config.yaml index c7091d86..33772366 100644 --- a/test/config/basic_config.yaml +++ b/test/config/basic_config.yaml @@ -56,3 +56,7 @@ descriptors: rate_limit: unit: day requests_per_unit: 25 + + - key: key6 + rate_limit: + unlimited: true diff --git a/test/config/config_test.go b/test/config/config_test.go index 107b3a43..d6d79c5e 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -164,6 +164,17 @@ func TestBasicConfig(t *testing.T) { assert.EqualValues(1, stats.NewCounter("test-domain.key4.over_limit").Value()) assert.EqualValues(1, stats.NewCounter("test-domain.key4.near_limit").Value()) assert.EqualValues(1, stats.NewCounter("test-domain.key4.within_limit").Value()) + + rl = rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key6", Value: "foo"}}, + }) + rl.Stats.TotalHits.Inc() + rl.Stats.WithinLimit.Inc() + assert.True(rl.Unlimited) + assert.EqualValues(1, stats.NewCounter("test-domain.key6.total_hits").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key6.within_limit").Value()) } func TestConfigLimitOverride(t *testing.T) { @@ -364,3 +375,14 @@ func TestNonMapList(t *testing.T) { }, "non_map_list.yaml: config error, yaml file contains list of type other than map: a") } + +func TestUnlimitedWithRateLimitUnit(t *testing.T) { + expectConfigPanic( + t, + func() { + config.NewRateLimitConfigImpl( + loadFile("unlimited_with_unit.yaml"), + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) + }, + "unlimited_with_unit.yaml: should not specify rate limit unit when unlimited") +} diff --git a/test/config/unlimited_with_unit.yaml b/test/config/unlimited_with_unit.yaml new file mode 100644 index 00000000..4b6f8b7f --- /dev/null +++ b/test/config/unlimited_with_unit.yaml @@ -0,0 +1,7 @@ +domain: test-domain +descriptors: + - key: foo + rate_limit: + unlimited: true + unit: day + requests_per_unit: 25 diff --git a/test/limiter/base_limiter_test.go b/test/limiter/base_limiter_test.go index e1b8167c..97fc97db 100644 --- a/test/limiter/base_limiter_test.go +++ b/test/limiter/base_limiter_test.go @@ -27,7 +27,7 @@ func TestGenerateCacheKeys(t *testing.T) { timeSource.EXPECT().UnixNow().Return(int64(1234)) baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value()) cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, 1) assert.Equal(1, len(cacheKeys)) @@ -46,7 +46,7 @@ func TestGenerateCacheKeysPrefix(t *testing.T) { timeSource.EXPECT().UnixNow().Return(int64(1234)) baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "prefix:", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value()) cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, 1) assert.Equal(1, len(cacheKeys)) @@ -100,7 +100,7 @@ func TestGetResponseStatusOverLimitWithLocalCache(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 4, 5) // As `isOverLimitWithLocalCache` is passed as `true`, immediate response is returned with no checks of the limits. responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, true, 2) @@ -121,7 +121,7 @@ func TestGetResponseStatusOverLimit(t *testing.T) { localCache := freecache.NewCache(100) sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, localCache, 0.8, "", sm) - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 7, 4, 5) responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) assert.Equal(pb.RateLimitResponse_OVER_LIMIT, responseStatus.GetCode()) @@ -143,7 +143,7 @@ func TestGetResponseStatusBelowLimit(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 9, 10) responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) diff --git a/test/memcached/cache_impl_test.go b/test/memcached/cache_impl_test.go index d663f675..5eef2c39 100644 --- a/test/memcached/cache_impl_test.go +++ b/test/memcached/cache_impl_test.go @@ -46,7 +46,7 @@ func TestMemcached(t *testing.T) { client.EXPECT().Increment("domain_key_value_1234", uint64(1)).Return(uint64(5), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -70,7 +70,7 @@ func TestMemcached(t *testing.T) { }, 1) limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"))} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[1].Limit, timeSource)}}, @@ -100,8 +100,8 @@ func TestMemcached(t *testing.T) { {{"key3", "value3"}, {"subkey3", "subvalue3"}}, }, 1) limits = []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3")), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"))} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}, @@ -137,7 +137,7 @@ func TestMemcachedGetError(t *testing.T) { client.EXPECT().Increment("domain_key_value_1234", uint64(1)).Return(uint64(5), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -155,7 +155,7 @@ func TestMemcachedGetError(t *testing.T) { client.EXPECT().Increment("domain_key_value1_1234", uint64(1)).Return(uint64(5), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value1"}}}, 1) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value1"))} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value1"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -228,7 +228,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"))} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -324,7 +324,7 @@ func TestNearLimit(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"))} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -377,7 +377,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key5_value5_1234", uint64(3)).Return(uint64(5), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"))} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -395,7 +395,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key6_value6_1234", uint64(2)).Return(uint64(7), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) - limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"))} + limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -413,7 +413,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key7_value7_1234", uint64(3)).Return(uint64(19), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"))} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -431,7 +431,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key8_value8_1234", uint64(3)).Return(uint64(22), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"))} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -449,7 +449,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key9_value9_1234", uint64(7)).Return(uint64(22), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"))} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -467,7 +467,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key10_value10_1234", uint64(3)).Return(uint64(30), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"))} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -511,7 +511,7 @@ func TestMemcacheWithJitter(t *testing.T) { ).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -554,7 +554,7 @@ func TestMemcacheAdd(t *testing.T) { uint64(2), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -578,7 +578,7 @@ func TestMemcacheAdd(t *testing.T) { ).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key2", "value2"}}}, 1) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2"))} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index 37bca184..145aeff3 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -48,7 +48,7 @@ func BenchmarkParallelDoLimit(b *testing.B) { cache := redis.NewFixedRateLimitCacheImpl(client, nil, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, "", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} + limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} // wait for the pool to fill up for { diff --git a/test/redis/fixed_cache_impl_test.go b/test/redis/fixed_cache_impl_test.go index e0723333..93dcf885 100644 --- a/test/redis/fixed_cache_impl_test.go +++ b/test/redis/fixed_cache_impl_test.go @@ -63,7 +63,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -88,7 +88,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { }, 1) limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"))} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[1].Limit, timeSource)}}, @@ -115,8 +115,8 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { {{"key3", "value3"}, {"subkey3", "subvalue3"}}, }, 1) limits = []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3")), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"))} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}, @@ -193,7 +193,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"))} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -288,7 +288,7 @@ func TestNearLimit(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"))} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -340,7 +340,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"))} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -357,7 +357,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) - limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"))} + limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -374,7 +374,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"))} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -391,7 +391,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"))} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -408,7 +408,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"))} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -425,7 +425,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"))} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -455,7 +455,7 @@ func TestRedisWithJitter(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"))} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, diff --git a/test/service/ratelimit_legacy_test.go b/test/service/ratelimit_legacy_test.go index 0c0a17aa..77fb31f7 100644 --- a/test/service/ratelimit_legacy_test.go +++ b/test/service/ratelimit_legacy_test.go @@ -93,7 +93,7 @@ func TestServiceLegacy(test *testing.T) { } limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key")), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false), nil} legacyLimits, err := convertRatelimits(limits) if err != nil { @@ -130,7 +130,7 @@ func TestServiceLegacy(test *testing.T) { // Config should still be valid. Also make sure order does not affect results. limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"))} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false)} legacyLimits, err = convertRatelimits(limits) if err != nil { t.assert.FailNow(err.Error()) @@ -193,7 +193,7 @@ func TestCacheErrorLegacy(test *testing.T) { if err != nil { t.assert.FailNow(err.Error()) } - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"))} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false)} t.config.EXPECT().GetLimit(nil, "different-domain", req.Descriptors[0]).Return(limits[0]) t.cache.EXPECT().DoLimit(nil, req, limits).Do( func(context.Context, *pb.RateLimitRequest, []*config.RateLimit) { diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index 787fbf2b..64c44ba1 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -2,6 +2,7 @@ package ratelimit_test import ( "github.com/envoyproxy/ratelimit/src/stats" + "math" "sync" "testing" @@ -121,7 +122,7 @@ func TestService(test *testing.T) { request = common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key")), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false), nil} t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) @@ -153,7 +154,7 @@ func TestService(test *testing.T) { // Config should still be valid. Also make sure order does not affect results. limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"))} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false)} t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) t.cache.EXPECT().DoLimit(nil, request, limits).Return( @@ -205,7 +206,7 @@ func TestCacheError(test *testing.T) { service := t.setupBasicService() request := common.NewRateLimitRequest("different-domain", [][][2]string{{{"foo", "bar"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"))} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false)} t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.cache.EXPECT().DoLimit(nil, request, limits).Do( func(context.Context, *pb.RateLimitRequest, []*config.RateLimit) { @@ -240,3 +241,41 @@ func TestInitialLoadError(test *testing.T) { t.assert.Equal("no rate limit configuration loaded", err.Error()) t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) } + +func TestUnlimited(test *testing.T) { + t := commonSetup(test) + defer t.controller.Finish() + service := t.setupBasicService() + + request := common.NewRateLimitRequest( + "some-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}, {{"baz", "qux"}}}, 1) + limits := []*config.RateLimit{ + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("foo_bar"), false), + nil, + config.NewRateLimit(55, pb.RateLimitResponse_RateLimit_SECOND, t.statsManager.NewStats("baz_qux"), true)} + t.config.EXPECT().GetLimit(nil, "some-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(nil, "some-domain", request.Descriptors[1]).Return(limits[1]) + t.config.EXPECT().GetLimit(nil, "some-domain", request.Descriptors[2]).Return(limits[2]) + + // Unlimited descriptors should not hit the cache + expectedCacheLimits := []*config.RateLimit{limits[0], nil, nil} + + t.cache.EXPECT().DoLimit(nil, request, expectedCacheLimits).Return([]*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + }) + + response, err := service.ShouldRateLimit(nil, request) + common.AssertProtoEqual( + t.assert, + &pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OK, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: math.MaxUint32}, + }}, + response) + t.assert.Nil(err) +} From 68dceda7bdb74d0488c75ed7246f3f1cd4e2b089 Mon Sep 17 00:00:00 2001 From: devincd <505259926@qq.com> Date: Tue, 27 Jul 2021 23:49:16 +0800 Subject: [PATCH 014/181] use flag.VisitAll function (#270) Signed-off-by: devincd <505259926@qq.com> --- src/client_cmd/main.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client_cmd/main.go b/src/client_cmd/main.go index 4934b2f9..030229a0 100644 --- a/src/client_cmd/main.go +++ b/src/client_cmd/main.go @@ -54,9 +54,9 @@ func main() { "descriptor list to query in =,=,... form") flag.Parse() - fmt.Printf("dial string: %s\n", *dialString) - fmt.Printf("domain: %s\n", *domain) - fmt.Printf("descriptors: %s\n", &descriptorsValue) + flag.VisitAll(func(f *flag.Flag) { + fmt.Printf("Flag: --%s=%q\n", f.Name, f.Value) + }) conn, err := grpc.Dial(*dialString, grpc.WithInsecure()) if err != nil { From bd46f11be716d29e7679e6566a674afec3643793 Mon Sep 17 00:00:00 2001 From: Yuki Sawa Date: Wed, 28 Jul 2021 15:21:16 -0700 Subject: [PATCH 015/181] update readme with deprecation, versioning details (#272) Signed-off-by: Yuki Sawa --- README.md | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 9c24a5ad..e1cac35e 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,7 @@ **Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* - [Overview](#overview) +- [Docker Image](#docker-image) - [Deprecation of Legacy Ratelimit Proto and v2 Ratelimit proto](#deprecation-of-legacy-ratelimit-proto-and-v2-ratelimit-proto) - [Deprecation Schedule](#deprecation-schedule) - [Building and Testing](#building-and-testing) @@ -47,27 +48,27 @@ applications. Applications request a rate limit decision based on a domain and a reads the configuration from disk via [runtime](https://github.com/lyft/goruntime), composes a cache key, and talks to the Redis cache. A decision is then returned to the caller. +# Docker Image +For every main commit, an image is pushed to [Dockerhub](https://hub.docker.com/r/envoyproxy/ratelimit/tags?page=1&ordering=last_updated). There is currently no versioning (post v1.4.0) and tags are based on commit sha. + # Deprecation of Legacy Ratelimit Proto and v2 Ratelimit proto Envoy's data-plane-api defines a ratelimit service proto v3 [rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto). Logically the data-plane-api rls [v3](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) -is equivalent to the rls [v2](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) -However, due to the namespace differences and how gRPC routing works it is not possible to transparently route the -legacy v2 ratelimit requests to the v3 definitions. Therefore, the ratelimit service will upgrade the requests, process them internally as it would -process a v3 ratelimit request, and then downgrade the response to send back to the client. This means that, +is equivalent to the rls [v2](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto). +However, due to the namespace differences and how gRPC routing works it is not possible to transparently route the legacy v2 ratelimit requests to the v3 definitions. +Therefore, the ratelimit service will upgrade the requests, process them internally as it would process a v3 ratelimit request, and then downgrade the response to send back to the client. This means that, for a slight performance hit for clients using the legacy proto, ratelimit is backwards compatible with the legacy proto. -Prior to version 2.0.0 ratelimit service contained a protocol definition that used to be supported in a legacy mode, -but support for it and was removed in 2.0.0. ## Deprecation Schedule 1. `v1.0.0` tagged on commit `0ded92a2af8261d43096eba4132e45b99a3b8b14`. Ratelimit has been in production use at Lyft for over 2 years. 2. `v1.1.0` introduces the data-plane-api proto and initiates the deprecation of the legacy [ratelimit.proto](https://github.com/lyft/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto). -3. `v2.0.0` deleted support for the legacy [ratelimit.proto](https://github.com/envoyproxy/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto). +3. `e91321b` [commit](https://github.com/envoyproxy/ratelimit/commit/e91321b10f1ad7691d0348e880bd75d0fca05758) deleted support for the legacy [ratelimit.proto](https://github.com/envoyproxy/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto). The current version of ratelimit protocol is changed to [v3 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) while [v2 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) is still supported as a legacy protocol. -4. `v3.0.0` deletes support for legacy [v2 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) +4. `TODO` deletes support for legacy [v2 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) # Building and Testing @@ -347,10 +348,10 @@ descriptors: requests_per_unit: 100 ``` -For an unlimited descriptor, the request will not be sent to the underlying cache (Redis/Memcached), but will be quickly returned locally by the ratelimit instance. -This can be useful for collecting statistics, or if one wants to define a descriptor that has no limit but the client wants to distinguish between such descriptor and one that does not exist. +For an unlimited descriptor, the request will not be sent to the underlying cache (Redis/Memcached), but will be quickly returned locally by the ratelimit instance. +This can be useful for collecting statistics, or if one wants to define a descriptor that has no limit but the client wants to distinguish between such descriptor and one that does not exist. -The return value for unlimited descriptors will be an OK status code with the LimitRemaining field set to MaxUint32 value. +The return value for unlimited descriptors will be an OK status code with the LimitRemaining field set to MaxUint32 value. ## Loading Configuration @@ -606,7 +607,7 @@ Experimental Memcache support has been added as an alternative to Redis in v1.5. To configure a Memcache instance use the following environment variables instead of the Redis variables: 1. `MEMCACHE_HOST_PORT=`: a comma separated list of hostname:port pairs for memcache nodes (mutually exclusive with `MEMCACHE_SRV`) -1. `MEMCACHE_SRV=`: an SRV record to lookup hosts from (mutually exclusive with `MEMCACHE_HOST_PORT`) +1. `MEMCACHE_SRV=`: an SRV record to lookup hosts from (mutually exclusive with `MEMCACHE_HOST_PORT`) 1. `MEMCACHE_SRV_REFRESH=0`: refresh the list of hosts every n seconds, if 0 no refreshing will happen, supports duration suffixes: "ns", "us" (or "µs"), "ms", "s", "m", "h". 1. `BACKEND_TYPE=memcache` 1. `CACHE_KEY_PREFIX`: a string to prepend to all cache keys @@ -616,9 +617,9 @@ With memcache mode increments will happen asynchronously, so it's technically po a client to exceed quota briefly if multiple requests happen at exactly the same time. Note that Memcache has a max key length of 250 characters, so operations referencing very long -descriptors will fail. Descriptors sent to Memcache should not contain whitespaces or control characters. +descriptors will fail. Descriptors sent to Memcache should not contain whitespaces or control characters. -When using multiple memcache nodes in `MEMCACHE_HOST_PORT=`, one should provide the identical list of memcache nodes +When using multiple memcache nodes in `MEMCACHE_HOST_PORT=`, one should provide the identical list of memcache nodes to all ratelimiter instances to ensure that a particular cache key is always hashed to the same memcache node. # Contact From 73ad2b35b90f4d3679e06ef9f6e35ed431546fa0 Mon Sep 17 00:00:00 2001 From: devincd <505259926@qq.com> Date: Fri, 30 Jul 2021 01:00:57 +0800 Subject: [PATCH 016/181] Fix bug and code optimization (#273) Signed-off-by: devincd <505259926@qq.com> --- src/redis/cache_impl.go | 6 +++--- src/redis/driver_impl.go | 4 ++-- src/server/server_impl.go | 15 ++++----------- test/redis/bench_test.go | 5 +++-- test/redis/driver_impl_test.go | 8 ++++---- 5 files changed, 16 insertions(+), 22 deletions(-) diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index 7bf9eafc..22679149 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -1,24 +1,24 @@ package redis import ( - "github.com/envoyproxy/ratelimit/src/stats" "math/rand" "github.com/coocood/freecache" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/server" "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/stats" "github.com/envoyproxy/ratelimit/src/utils" ) func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, statsManager stats.Manager) limiter.RateLimitCache { var perSecondPool Client if s.RedisPerSecond { - perSecondPool = NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, + perSecondPool = NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, s.RedisPerSecondSocketType, s.RedisPerSecondType, s.RedisPerSecondUrl, s.RedisPerSecondPoolSize, s.RedisPerSecondPipelineWindow, s.RedisPerSecondPipelineLimit) } var otherPool Client - otherPool = NewClientImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisType, s.RedisUrl, s.RedisPoolSize, + otherPool = NewClientImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisSocketType, s.RedisType, s.RedisUrl, s.RedisPoolSize, s.RedisPipelineWindow, s.RedisPipelineLimit) return NewFixedRateLimitCacheImpl( diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index f6449ea5..302dd522 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -52,7 +52,7 @@ func checkError(err error) { } } -func NewClientImpl(scope stats.Scope, useTls bool, auth string, redisType string, url string, poolSize int, +func NewClientImpl(scope stats.Scope, useTls bool, auth, redisSocketType, redisType, url string, poolSize int, pipelineWindow time.Duration, pipelineLimit int) Client { logger.Warnf("connecting to redis on %s with pool size %d", url, poolSize) @@ -92,7 +92,7 @@ func NewClientImpl(scope stats.Scope, useTls bool, auth string, redisType string var err error switch strings.ToLower(redisType) { case "single": - client, err = poolFunc("tcp", url) + client, err = poolFunc(redisSocketType, url) case "cluster": urls := strings.Split(url, ",") if implicitPipelining == false { diff --git a/src/server/server_impl.go b/src/server/server_impl.go index 15305a32..f7e329d2 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -4,7 +4,6 @@ import ( "bytes" "expvar" "fmt" - "github.com/envoyproxy/ratelimit/src/stats" "io" "net/http" "net/http/pprof" @@ -13,6 +12,8 @@ import ( "strconv" "sync" + "github.com/envoyproxy/ratelimit/src/stats" + "os" "os/signal" "syscall" @@ -283,13 +284,13 @@ func newServer(s settings.Settings, name string, statsManager stats.Manager, loc func (server *server) Stop() { server.grpcServer.GracefulStop() server.listenerMu.Lock() + defer server.listenerMu.Unlock() if server.debugListener.listener != nil { server.debugListener.listener.Close() } if server.httpServer != nil { server.httpServer.Close() } - server.listenerMu.Unlock() } func (server *server) handleGracefulShutdown() { @@ -300,15 +301,7 @@ func (server *server) handleGracefulShutdown() { sig := <-sigs logger.Infof("Ratelimit server received %v, shutting down gracefully", sig) - server.grpcServer.GracefulStop() - server.listenerMu.Lock() - if server.debugListener.listener != nil { - server.debugListener.listener.Close() - } - if server.httpServer != nil { - server.httpServer.Close() - } - server.listenerMu.Unlock() + server.Stop() os.Exit(0) }() } diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index 145aeff3..1e3ddd72 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -2,11 +2,12 @@ package redis_test import ( "context" - "github.com/envoyproxy/ratelimit/test/mocks/stats" "runtime" "testing" "time" + "github.com/envoyproxy/ratelimit/test/mocks/stats" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/redis" @@ -43,7 +44,7 @@ func BenchmarkParallelDoLimit(b *testing.B) { return func(b *testing.B) { statsStore := gostats.NewStore(gostats.NewNullSink(), false) sm := stats.NewMockStatManager(statsStore) - client := redis.NewClientImpl(statsStore, false, "", "single", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit) + client := redis.NewClientImpl(statsStore, false, "", "tcp", "single", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit) defer client.Close() cache := redis.NewFixedRateLimitCacheImpl(client, nil, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, "", sm) diff --git a/test/redis/driver_impl_test.go b/test/redis/driver_impl_test.go index ab488e23..0653cc4c 100644 --- a/test/redis/driver_impl_test.go +++ b/test/redis/driver_impl_test.go @@ -6,7 +6,7 @@ import ( "github.com/alicebob/miniredis/v2" "github.com/envoyproxy/ratelimit/src/redis" - "github.com/lyft/gostats" + stats "github.com/lyft/gostats" "github.com/stretchr/testify/assert" ) @@ -36,7 +36,7 @@ func testNewClientImpl(t *testing.T, pipelineWindow time.Duration, pipelineLimit statsStore := stats.NewStore(stats.NewNullSink(), false) mkRedisClient := func(auth, addr string) redis.Client { - return redis.NewClientImpl(statsStore, false, auth, "single", addr, 1, pipelineWindow, pipelineLimit) + return redis.NewClientImpl(statsStore, false, auth, "tcp", "single", addr, 1, pipelineWindow, pipelineLimit) } t.Run("connection refused", func(t *testing.T) { @@ -103,7 +103,7 @@ func TestDoCmd(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) mkRedisClient := func(addr string) redis.Client { - return redis.NewClientImpl(statsStore, false, "", "single", addr, 1, 0, 0) + return redis.NewClientImpl(statsStore, false, "", "tcp", "single", addr, 1, 0, 0) } t.Run("SETGET ok", func(t *testing.T) { @@ -148,7 +148,7 @@ func testPipeDo(t *testing.T, pipelineWindow time.Duration, pipelineLimit int) f statsStore := stats.NewStore(stats.NewNullSink(), false) mkRedisClient := func(addr string) redis.Client { - return redis.NewClientImpl(statsStore, false, "", "single", addr, 1, pipelineWindow, pipelineLimit) + return redis.NewClientImpl(statsStore, false, "", "tcp", "single", addr, 1, pipelineWindow, pipelineLimit) } t.Run("SETGET ok", func(t *testing.T) { From 4bb32826997cba76f8f3f193b9eef5762de0d175 Mon Sep 17 00:00:00 2001 From: Yuki Sawa Date: Tue, 3 Aug 2021 08:27:50 -0700 Subject: [PATCH 017/181] Remove support for legacy v2 api (#278) Signed-off-by: Yuki Sawa --- README.md | 18 +- go.sum | 10 - src/service/ratelimit.go | 10 - src/service/ratelimit_legacy.go | 135 ------ src/service_cmd/runner/runner.go | 9 +- src/stats/manager.go | 11 - src/stats/manager_impl.go | 9 - test/common/common.go | 18 - test/integration/integration_test.go | 113 ----- .../ratelimit/config/another_legacy.yaml | 11 - .../ratelimit/config/basic_legacy.yaml | 6 - test/mocks/stats/manager.go | 9 - test/service/ratelimit_legacy_test.go | 429 ------------------ 13 files changed, 10 insertions(+), 778 deletions(-) delete mode 100644 src/service/ratelimit_legacy.go delete mode 100644 test/integration/runtime/current/ratelimit/config/another_legacy.yaml delete mode 100644 test/integration/runtime/current/ratelimit/config/basic_legacy.yaml delete mode 100644 test/service/ratelimit_legacy_test.go diff --git a/README.md b/README.md index e1cac35e..b5768a99 100644 --- a/README.md +++ b/README.md @@ -4,8 +4,8 @@ - [Overview](#overview) - [Docker Image](#docker-image) -- [Deprecation of Legacy Ratelimit Proto and v2 Ratelimit proto](#deprecation-of-legacy-ratelimit-proto-and-v2-ratelimit-proto) - - [Deprecation Schedule](#deprecation-schedule) +- [Supported Envoy APIs](#supported-envoy-apis) + - [API Deprecation History](#api-deprecation-history) - [Building and Testing](#building-and-testing) - [Docker-compose setup](#docker-compose-setup) - [Full test environment](#full-test-environment) @@ -51,16 +51,12 @@ decision is then returned to the caller. # Docker Image For every main commit, an image is pushed to [Dockerhub](https://hub.docker.com/r/envoyproxy/ratelimit/tags?page=1&ordering=last_updated). There is currently no versioning (post v1.4.0) and tags are based on commit sha. -# Deprecation of Legacy Ratelimit Proto and v2 Ratelimit proto +# Supported Envoy APIs -Envoy's data-plane-api defines a ratelimit service proto v3 [rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto). -Logically the data-plane-api rls [v3](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) -is equivalent to the rls [v2](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto). -However, due to the namespace differences and how gRPC routing works it is not possible to transparently route the legacy v2 ratelimit requests to the v3 definitions. -Therefore, the ratelimit service will upgrade the requests, process them internally as it would process a v3 ratelimit request, and then downgrade the response to send back to the client. This means that, -for a slight performance hit for clients using the legacy proto, ratelimit is backwards compatible with the legacy proto. +[v3 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) is currently supported. +Support for [v2 rls proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto) is now deprecated. -## Deprecation Schedule +## API Deprecation History 1. `v1.0.0` tagged on commit `0ded92a2af8261d43096eba4132e45b99a3b8b14`. Ratelimit has been in production use at Lyft for over 2 years. 2. `v1.1.0` introduces the data-plane-api proto and initiates the deprecation of the legacy [ratelimit.proto](https://github.com/lyft/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto). @@ -68,7 +64,7 @@ for a slight performance hit for clients using the legacy proto, ratelimit is ba The current version of ratelimit protocol is changed to [v3 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) while [v2 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) is still supported as a legacy protocol. -4. `TODO` deletes support for legacy [v2 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) +4. `TODO` deleted support for legacy [v2 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) # Building and Testing diff --git a/go.sum b/go.sum index 071a59b3..ff594f00 100644 --- a/go.sum +++ b/go.sum @@ -36,14 +36,12 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.4.1 h1:ocYkMQY5RrXTYgXl7ICpV0IXwlEQGwKIsery4gyXa1U= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= @@ -76,11 +74,9 @@ github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= @@ -101,7 +97,6 @@ golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -117,11 +112,8 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262 h1:qsl9y/CJx34tuA7QCPNp86JNJe4spst6Ff8MjvPUdPg= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -131,10 +123,8 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index 8ac20019..b8d1c0bd 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -20,7 +20,6 @@ import ( type RateLimitServiceServer interface { pb.RateLimitServiceServer GetCurrentConfig() config.RateLimitConfig - GetLegacyService() RateLimitLegacyServiceServer } type service struct { @@ -31,7 +30,6 @@ type service struct { runtimeUpdateEvent chan int cache limiter.RateLimitCache stats stats.ServiceStats - legacy *legacyService runtimeWatchRoot bool } @@ -185,10 +183,6 @@ func (this *service) ShouldRateLimit( return response, nil } -func (this *service) GetLegacyService() RateLimitLegacyServiceServer { - return this.legacy -} - func (this *service) GetCurrentConfig() config.RateLimitConfig { this.configLock.RLock() defer this.configLock.RUnlock() @@ -208,10 +202,6 @@ func NewService(runtime loader.IFace, cache limiter.RateLimitCache, stats: statsManager.NewServiceStats(), runtimeWatchRoot: runtimeWatchRoot, } - newService.legacy = &legacyService{ - s: newService, - shouldRateLimitLegacyStats: statsManager.NewShouldRateLimitLegacyStats(), - } runtime.AddUpdateCallback(newService.runtimeUpdateEvent) diff --git a/src/service/ratelimit_legacy.go b/src/service/ratelimit_legacy.go deleted file mode 100644 index ac3971e0..00000000 --- a/src/service/ratelimit_legacy.go +++ /dev/null @@ -1,135 +0,0 @@ -package ratelimit - -import ( - core_legacy "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" - pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/envoyproxy/ratelimit/src/stats" - "golang.org/x/net/context" -) - -type RateLimitLegacyServiceServer interface { - pb_legacy.RateLimitServiceServer -} - -// legacyService is used to implement v2 rls.proto (https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto) -// the legacyService receives RateLimitRequests, converts the request, and calls the service's ShouldRateLimit method. -type legacyService struct { - s *service - shouldRateLimitLegacyStats stats.ShouldRateLimitLegacyStats -} - -func (this *legacyService) ShouldRateLimit( - ctx context.Context, - legacyRequest *pb_legacy.RateLimitRequest) (finalResponse *pb_legacy.RateLimitResponse, finalError error) { - - request, err := ConvertLegacyRequest(legacyRequest) - if err != nil { - this.shouldRateLimitLegacyStats.ReqConversionError.Inc() - return nil, err - } - resp, err := this.s.ShouldRateLimit(ctx, request) - if err != nil { - this.shouldRateLimitLegacyStats.ShouldRateLimitError.Inc() - return nil, err - } - - legacyResponse, err := ConvertResponse(resp) - if err != nil { - this.shouldRateLimitLegacyStats.RespConversionError.Inc() - return nil, err - } - - return legacyResponse, nil -} - -func ConvertLegacyRequest(legacyRequest *pb_legacy.RateLimitRequest) (*pb.RateLimitRequest, error) { - if legacyRequest == nil { - return nil, nil - } - request := &pb.RateLimitRequest{ - Domain: legacyRequest.GetDomain(), - HitsAddend: legacyRequest.GetHitsAddend(), - } - if legacyRequest.GetDescriptors() != nil { - descriptors := make([]*pb_struct.RateLimitDescriptor, len(legacyRequest.GetDescriptors())) - for i, descriptor := range legacyRequest.GetDescriptors() { - if descriptor != nil { - descriptors[i] = &pb_struct.RateLimitDescriptor{} - if descriptor.GetEntries() != nil { - entries := make([]*pb_struct.RateLimitDescriptor_Entry, len(descriptor.GetEntries())) - for j, entry := range descriptor.GetEntries() { - if entry != nil { - entries[j] = &pb_struct.RateLimitDescriptor_Entry{ - Key: entry.GetKey(), - Value: entry.GetValue(), - } - } - } - descriptors[i].Entries = entries - } - } - } - request.Descriptors = descriptors - } - return request, nil -} - -func ConvertResponse(response *pb.RateLimitResponse) (*pb_legacy.RateLimitResponse, error) { - if response == nil { - return nil, nil - } - - legacyResponse := &pb_legacy.RateLimitResponse{ - OverallCode: pb_legacy.RateLimitResponse_Code(response.GetOverallCode()), - } - - if response.GetStatuses() != nil { - statuses := make([]*pb_legacy.RateLimitResponse_DescriptorStatus, len(response.GetStatuses())) - for i, status := range response.GetStatuses() { - if status != nil { - statuses[i] = &pb_legacy.RateLimitResponse_DescriptorStatus{ - Code: pb_legacy.RateLimitResponse_Code(status.GetCode()), - LimitRemaining: status.GetLimitRemaining(), - } - if status.GetCurrentLimit() != nil { - statuses[i].CurrentLimit = &pb_legacy.RateLimitResponse_RateLimit{ - Name: status.GetCurrentLimit().GetName(), - RequestsPerUnit: status.GetCurrentLimit().GetRequestsPerUnit(), - Unit: pb_legacy.RateLimitResponse_RateLimit_Unit(status.GetCurrentLimit().GetUnit()), - } - } - } - } - legacyResponse.Statuses = statuses - } - - if response.GetRequestHeadersToAdd() != nil { - requestHeadersToAdd := make([]*core_legacy.HeaderValue, len(response.GetRequestHeadersToAdd())) - for i, header := range response.GetRequestHeadersToAdd() { - if header != nil { - requestHeadersToAdd[i] = &core_legacy.HeaderValue{ - Key: header.GetKey(), - Value: header.GetValue(), - } - } - } - legacyResponse.RequestHeadersToAdd = requestHeadersToAdd - } - - if response.GetResponseHeadersToAdd() != nil { - responseHeadersToAdd := make([]*core_legacy.HeaderValue, len(response.GetResponseHeadersToAdd())) - for i, header := range response.GetResponseHeadersToAdd() { - if header != nil { - responseHeadersToAdd[i] = &core_legacy.HeaderValue{ - Key: header.GetKey(), - Value: header.GetValue(), - } - } - } - legacyResponse.Headers = responseHeadersToAdd - } - - return legacyResponse, nil -} diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index c8fb45e3..31865045 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -14,7 +14,6 @@ import ( "github.com/coocood/freecache" - pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" @@ -119,12 +118,10 @@ func (runner *Runner) Run() { srv.AddJsonHandler(service) - // Ratelimit is compatible with two proto definitions - // 1. data-plane-api v3 rls.proto: https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto + // Ratelimit is compatible with the below proto definition + // data-plane-api v3 rls.proto: https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto + // v2 proto is no longer supported pb.RegisterRateLimitServiceServer(srv.GrpcServer(), service) - // 1. data-plane-api v2 rls.proto: https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto - pb_legacy.RegisterRateLimitServiceServer(srv.GrpcServer(), service.GetLegacyService()) - // (1) is the current definition, and (2) is the legacy definition. srv.Start() } diff --git a/src/stats/manager.go b/src/stats/manager.go index a96753b7..6c54e474 100644 --- a/src/stats/manager.go +++ b/src/stats/manager.go @@ -14,9 +14,6 @@ type Manager interface { // Initializes a ServiceStats structure. // Multiple calls to this method are idempotent. NewServiceStats() ServiceStats - // Initializes a ShouldRateLimitLegacyStats structure. - // Multiple calls to this method are idempotent. - NewShouldRateLimitLegacyStats() ShouldRateLimitLegacyStats // Returns the stats.Store wrapped by the Manager. GetStatsStore() stats.Store } @@ -24,7 +21,6 @@ type Manager interface { type ManagerImpl struct { store gostats.Store rlStatsScope gostats.Scope - legacyStatsScope gostats.Scope serviceStatsScope gostats.Scope shouldRateLimitScope gostats.Scope } @@ -44,13 +40,6 @@ type ServiceStats struct { ShouldRateLimit ShouldRateLimitStats } -// Legacy Stats for ratelimit errors. -type ShouldRateLimitLegacyStats struct { - ReqConversionError gostats.Counter - RespConversionError gostats.Counter - ShouldRateLimitError gostats.Counter -} - // Stats for an individual rate limit config entry. type RateLimitStats struct { Key string diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index 48a01b1a..e7b6a0b1 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -11,7 +11,6 @@ func NewStatManager(store gostats.Store, settings settings.Settings) *ManagerImp return &ManagerImpl{ store: store, rlStatsScope: serviceScope.Scope("rate_limit"), - legacyStatsScope: serviceScope.Scope("call.should_rate_limit_legacy"), serviceStatsScope: serviceScope, shouldRateLimitScope: serviceScope.Scope("call.should_rate_limit"), } @@ -36,14 +35,6 @@ func (this *ManagerImpl) NewStats(key string) RateLimitStats { return ret } -func (this *ManagerImpl) NewShouldRateLimitLegacyStats() ShouldRateLimitLegacyStats { - return ShouldRateLimitLegacyStats{ - ReqConversionError: this.legacyStatsScope.NewCounter("req_conversion_error"), - RespConversionError: this.legacyStatsScope.NewCounter("resp_conversion_error"), - ShouldRateLimitError: this.legacyStatsScope.NewCounter("should_rate_limit_error"), - } -} - func (this *ManagerImpl) NewShouldRateLimitStats() ShouldRateLimitStats { ret := ShouldRateLimitStats{} ret.RedisError = this.shouldRateLimitScope.NewCounter("redis_error") diff --git a/test/common/common.go b/test/common/common.go index b90c536e..54216f1f 100644 --- a/test/common/common.go +++ b/test/common/common.go @@ -15,9 +15,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/stretchr/testify/assert" - pb_struct_legacy "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" - pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" ) @@ -66,22 +64,6 @@ func NewRateLimitRequest(domain string, descriptors [][][2]string, hitsAddend ui return request } -func NewRateLimitRequestLegacy(domain string, descriptors [][][2]string, hitsAddend uint32) *pb_legacy.RateLimitRequest { - request := &pb_legacy.RateLimitRequest{} - request.Domain = domain - for _, descriptor := range descriptors { - newDescriptor := &pb_struct_legacy.RateLimitDescriptor{} - for _, entry := range descriptor { - newDescriptor.Entries = append( - newDescriptor.Entries, - &pb_struct_legacy.RateLimitDescriptor_Entry{Key: entry[0], Value: entry[1]}) - } - request.Descriptors = append(request.Descriptors, newDescriptor) - } - request.HitsAddend = hitsAddend - return request -} - func AssertProtoEqual(assert *assert.Assertions, expected proto.Message, actual proto.Message) { assert.True(proto.Equal(expected, actual), fmt.Sprintf("These two protobuf messages are not equal:\nexpected: %v\nactual: %v", expected, actual)) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 249d9b2c..784944f6 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -11,7 +11,6 @@ import ( "testing" "time" - pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/memcached" "github.com/envoyproxy/ratelimit/src/service_cmd/runner" @@ -64,17 +63,6 @@ func newDescriptorStatus(status pb.RateLimitResponse_Code, requestsPerUnit uint3 } } -func newDescriptorStatusLegacy( - status pb_legacy.RateLimitResponse_Code, requestsPerUnit uint32, - unit pb_legacy.RateLimitResponse_RateLimit_Unit, limitRemaining uint32) *pb_legacy.RateLimitResponse_DescriptorStatus { - - return &pb_legacy.RateLimitResponse_DescriptorStatus{ - Code: status, - CurrentLimit: &pb_legacy.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}, - LimitRemaining: limitRemaining, - } -} - func makeSimpleRedisSettings(redisPort int, perSecondPort int, perSecond bool, localCacheSize int) settings.Settings { s := defaultSettings() @@ -600,107 +588,6 @@ func testBasicBaseConfig(s settings.Settings) func(*testing.T) { } } -func TestBasicConfigLegacy(t *testing.T) { - common.WithMultiRedis(t, []common.RedisConfig{ - {Port: 6383}, - }, func() { - testBasicConfigLegacy(t) - }) -} - -func testBasicConfigLegacy(t *testing.T) { - s := makeSimpleRedisSettings(6383, 6380, false, 0) - - runner := startTestRunner(t, s) - defer runner.Stop() - - assert := assert.New(t) - conn, err := grpc.Dial("localhost:8083", grpc.WithInsecure()) - - assert.NoError(err) - defer conn.Close() - c := pb_legacy.NewRateLimitServiceClient(conn) - - response, err := c.ShouldRateLimit( - context.Background(), - common.NewRateLimitRequestLegacy("foo", [][][2]string{{{"hello", "world"}}}, 1)) - common.AssertProtoEqual( - assert, - &pb_legacy.RateLimitResponse{ - OverallCode: pb_legacy.RateLimitResponse_OK, - Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{{Code: pb_legacy.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}}, - response) - assert.NoError(err) - - response, err = c.ShouldRateLimit( - context.Background(), - common.NewRateLimitRequestLegacy("basic_legacy", [][][2]string{{{"key1", "foo"}}}, 1)) - common.AssertProtoEqual( - assert, - &pb_legacy.RateLimitResponse{ - OverallCode: pb_legacy.RateLimitResponse_OK, - Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ - newDescriptorStatusLegacy(pb_legacy.RateLimitResponse_OK, 50, pb_legacy.RateLimitResponse_RateLimit_SECOND, 49)}}, - response) - assert.NoError(err) - - // Now come up with a random key, and go over limit for a minute limit which should always work. - r := rand.New(rand.NewSource(time.Now().UnixNano())) - randomInt := r.Int() - for i := 0; i < 25; i++ { - response, err = c.ShouldRateLimit( - context.Background(), - common.NewRateLimitRequestLegacy( - "another", [][][2]string{{{"key2", strconv.Itoa(randomInt)}}}, 1)) - - status := pb_legacy.RateLimitResponse_OK - limitRemaining := uint32(20 - (i + 1)) - if i >= 20 { - status = pb_legacy.RateLimitResponse_OVER_LIMIT - limitRemaining = 0 - } - - common.AssertProtoEqual( - assert, - &pb_legacy.RateLimitResponse{ - OverallCode: status, - Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ - newDescriptorStatusLegacy(status, 20, pb_legacy.RateLimitResponse_RateLimit_MINUTE, limitRemaining)}}, - response) - assert.NoError(err) - } - - // Limit now against 2 keys in the same domain. - randomInt = r.Int() - for i := 0; i < 15; i++ { - response, err = c.ShouldRateLimit( - context.Background(), - common.NewRateLimitRequestLegacy( - "another_legacy", - [][][2]string{ - {{"key2", strconv.Itoa(randomInt)}}, - {{"key3", strconv.Itoa(randomInt)}}}, 1)) - - status := pb_legacy.RateLimitResponse_OK - limitRemaining1 := uint32(20 - (i + 1)) - limitRemaining2 := uint32(10 - (i + 1)) - if i >= 10 { - status = pb_legacy.RateLimitResponse_OVER_LIMIT - limitRemaining2 = 0 - } - - common.AssertProtoEqual( - assert, - &pb_legacy.RateLimitResponse{ - OverallCode: status, - Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ - newDescriptorStatusLegacy(pb_legacy.RateLimitResponse_OK, 20, pb_legacy.RateLimitResponse_RateLimit_MINUTE, limitRemaining1), - newDescriptorStatusLegacy(status, 10, pb_legacy.RateLimitResponse_RateLimit_HOUR, limitRemaining2)}}, - response) - assert.NoError(err) - } -} - func startTestRunner(t *testing.T, s settings.Settings) *runner.Runner { t.Helper() runner := runner.NewRunner(s) diff --git a/test/integration/runtime/current/ratelimit/config/another_legacy.yaml b/test/integration/runtime/current/ratelimit/config/another_legacy.yaml deleted file mode 100644 index 377f90bd..00000000 --- a/test/integration/runtime/current/ratelimit/config/another_legacy.yaml +++ /dev/null @@ -1,11 +0,0 @@ -domain: another_legacy -descriptors: - - key: key2 - rate_limit: - unit: minute - requests_per_unit: 20 - - - key: key3 - rate_limit: - unit: hour - requests_per_unit: 10 \ No newline at end of file diff --git a/test/integration/runtime/current/ratelimit/config/basic_legacy.yaml b/test/integration/runtime/current/ratelimit/config/basic_legacy.yaml deleted file mode 100644 index 3135da55..00000000 --- a/test/integration/runtime/current/ratelimit/config/basic_legacy.yaml +++ /dev/null @@ -1,6 +0,0 @@ -domain: basic_legacy -descriptors: - - key: key1 - rate_limit: - unit: second - requests_per_unit: 50 diff --git a/test/mocks/stats/manager.go b/test/mocks/stats/manager.go index 52282e72..cbf5bbf1 100644 --- a/test/mocks/stats/manager.go +++ b/test/mocks/stats/manager.go @@ -30,15 +30,6 @@ func (m *MockStatManager) NewServiceStats() stats.ServiceStats { return ret } -func (m *MockStatManager) NewShouldRateLimitLegacyStats() stats.ShouldRateLimitLegacyStats { - s := m.store.Scope("call.should_rate_limit_legacy") - return stats.ShouldRateLimitLegacyStats{ - ReqConversionError: s.NewCounter("req_conversion_error"), - RespConversionError: s.NewCounter("resp_conversion_error"), - ShouldRateLimitError: s.NewCounter("should_rate_limit_error"), - } -} - func (m *MockStatManager) NewStats(key string) stats.RateLimitStats { ret := stats.RateLimitStats{} logger.Debugf("outputing test gostats %s", key) diff --git a/test/service/ratelimit_legacy_test.go b/test/service/ratelimit_legacy_test.go deleted file mode 100644 index 77fb31f7..00000000 --- a/test/service/ratelimit_legacy_test.go +++ /dev/null @@ -1,429 +0,0 @@ -package ratelimit_test - -import ( - "github.com/envoyproxy/ratelimit/src/stats" - "testing" - - core_legacy "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - pb_struct_legacy "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" - core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" - pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/envoyproxy/ratelimit/src/config" - "github.com/envoyproxy/ratelimit/src/redis" - "github.com/envoyproxy/ratelimit/src/service" - "github.com/envoyproxy/ratelimit/test/common" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" - "golang.org/x/net/context" -) - -func convertRatelimit(ratelimit *pb.RateLimitResponse_RateLimit) (*pb_legacy.RateLimitResponse_RateLimit, error) { - if ratelimit == nil { - return nil, nil - } - - return &pb_legacy.RateLimitResponse_RateLimit{ - Name: ratelimit.GetName(), - RequestsPerUnit: ratelimit.GetRequestsPerUnit(), - Unit: pb_legacy.RateLimitResponse_RateLimit_Unit(ratelimit.GetUnit()), - }, nil -} - -func convertRatelimits(ratelimits []*config.RateLimit) ([]*pb_legacy.RateLimitResponse_RateLimit, error) { - if ratelimits == nil { - return nil, nil - } - - ret := make([]*pb_legacy.RateLimitResponse_RateLimit, 0) - for _, rl := range ratelimits { - if rl == nil { - ret = append(ret, nil) - continue - } - legacyRl, err := convertRatelimit(rl.Limit) - if err != nil { - return nil, err - } - ret = append(ret, legacyRl) - } - - return ret, nil -} - -func TestServiceLegacy(test *testing.T) { - t := commonSetup(test) - defer t.controller.Finish() - service := t.setupBasicService() - - // First request, config should be loaded. - legacyRequest := common.NewRateLimitRequestLegacy("test-domain", [][][2]string{{{"hello", "world"}}}, 1) - req, err := ratelimit.ConvertLegacyRequest(legacyRequest) - if err != nil { - t.assert.FailNow(err.Error()) - } - t.config.EXPECT().GetLimit(nil, "test-domain", req.Descriptors[0]).Return(nil) - t.cache.EXPECT().DoLimit(nil, req, []*config.RateLimit{nil}).Return( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) - - response, err := service.GetLegacyService().ShouldRateLimit(nil, legacyRequest) - common.AssertProtoEqual( - t.assert, - &pb_legacy.RateLimitResponse{ - OverallCode: pb_legacy.RateLimitResponse_OK, - Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{{Code: pb_legacy.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}}, - response) - t.assert.Nil(err) - - // Force a config reload. - barrier := newBarrier() - t.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Manager) { barrier.signal() }).Return(t.config) - t.runtimeUpdateCallback <- 1 - barrier.wait() - - // Different request. - legacyRequest = common.NewRateLimitRequestLegacy( - "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) - req, err = ratelimit.ConvertLegacyRequest(legacyRequest) - if err != nil { - t.assert.FailNow(err.Error()) - } - - limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false), - nil} - legacyLimits, err := convertRatelimits(limits) - if err != nil { - t.assert.FailNow(err.Error()) - } - - t.config.EXPECT().GetLimit(nil, "different-domain", req.Descriptors[0]).Return(limits[0]) - t.config.EXPECT().GetLimit(nil, "different-domain", req.Descriptors[1]).Return(limits[1]) - t.cache.EXPECT().DoLimit(nil, req, limits).Return( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) - response, err = service.GetLegacyService().ShouldRateLimit(nil, legacyRequest) - common.AssertProtoEqual( - t.assert, - &pb_legacy.RateLimitResponse{ - OverallCode: pb_legacy.RateLimitResponse_OVER_LIMIT, - Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ - {Code: pb_legacy.RateLimitResponse_OVER_LIMIT, CurrentLimit: legacyLimits[0], LimitRemaining: 0}, - {Code: pb_legacy.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, - }}, - response) - t.assert.Nil(err) - - // Config load failure. - t.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Manager) { - defer barrier.signal() - panic(config.RateLimitConfigError("load error")) - }) - t.runtimeUpdateCallback <- 1 - barrier.wait() - - // Config should still be valid. Also make sure order does not affect results. - limits = []*config.RateLimit{ - nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false)} - legacyLimits, err = convertRatelimits(limits) - if err != nil { - t.assert.FailNow(err.Error()) - } - - t.config.EXPECT().GetLimit(nil, "different-domain", req.Descriptors[0]).Return(limits[0]) - t.config.EXPECT().GetLimit(nil, "different-domain", req.Descriptors[1]).Return(limits[1]) - t.cache.EXPECT().DoLimit(nil, req, limits).Return( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0}}) - response, err = service.GetLegacyService().ShouldRateLimit(nil, legacyRequest) - common.AssertProtoEqual( - t.assert, - &pb_legacy.RateLimitResponse{ - OverallCode: pb_legacy.RateLimitResponse_OVER_LIMIT, - Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ - {Code: pb_legacy.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, - {Code: pb_legacy.RateLimitResponse_OVER_LIMIT, CurrentLimit: legacyLimits[1], LimitRemaining: 0}, - }}, - response) - t.assert.Nil(err) - - t.assert.EqualValues(2, t.statStore.NewCounter("config_load_success").Value()) - t.assert.EqualValues(1, t.statStore.NewCounter("config_load_error").Value()) -} - -func TestEmptyDomainLegacy(test *testing.T) { - t := commonSetup(test) - defer t.controller.Finish() - service := t.setupBasicService() - - request := common.NewRateLimitRequestLegacy("", [][][2]string{{{"hello", "world"}}}, 1) - response, err := service.GetLegacyService().ShouldRateLimit(nil, request) - t.assert.Nil(response) - t.assert.Equal("rate limit domain must not be empty", err.Error()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) -} - -func TestEmptyDescriptorsLegacy(test *testing.T) { - t := commonSetup(test) - defer t.controller.Finish() - service := t.setupBasicService() - - request := common.NewRateLimitRequestLegacy("test-domain", [][][2]string{}, 1) - response, err := service.GetLegacyService().ShouldRateLimit(nil, request) - t.assert.Nil(response) - t.assert.Equal("rate limit descriptor list must not be empty", err.Error()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) -} - -func TestCacheErrorLegacy(test *testing.T) { - t := commonSetup(test) - defer t.controller.Finish() - service := t.setupBasicService() - - legacyRequest := common.NewRateLimitRequestLegacy("different-domain", [][][2]string{{{"foo", "bar"}}}, 1) - req, err := ratelimit.ConvertLegacyRequest(legacyRequest) - if err != nil { - t.assert.FailNow(err.Error()) - } - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false)} - t.config.EXPECT().GetLimit(nil, "different-domain", req.Descriptors[0]).Return(limits[0]) - t.cache.EXPECT().DoLimit(nil, req, limits).Do( - func(context.Context, *pb.RateLimitRequest, []*config.RateLimit) { - panic(redis.RedisError("cache error")) - }) - - response, err := service.GetLegacyService().ShouldRateLimit(nil, legacyRequest) - t.assert.Nil(response) - t.assert.Equal("cache error", err.Error()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.redis_error").Value()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) -} - -func TestInitialLoadErrorLegacy(test *testing.T) { - t := commonSetup(test) - defer t.controller.Finish() - - t.runtime.EXPECT().AddUpdateCallback(gomock.Any()).Do( - func(callback chan<- int) { t.runtimeUpdateCallback = callback }) - t.runtime.EXPECT().Snapshot().Return(t.snapshot).MinTimes(1) - t.snapshot.EXPECT().Keys().Return([]string{"foo", "config.basic_config"}).MinTimes(1) - t.snapshot.EXPECT().Get("config.basic_config").Return("fake_yaml").MinTimes(1) - t.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Manager) { - panic(config.RateLimitConfigError("load error")) - }) - service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statsManager, true) - - request := common.NewRateLimitRequestLegacy("test-domain", [][][2]string{{{"hello", "world"}}}, 1) - response, err := service.GetLegacyService().ShouldRateLimit(nil, request) - t.assert.Nil(response) - t.assert.Equal("no rate limit configuration loaded", err.Error()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) - t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit_legacy.should_rate_limit_error").Value()) - -} - -func TestConvertLegacyRequest(test *testing.T) { - req, err := ratelimit.ConvertLegacyRequest(nil) - if err != nil { - assert.FailNow(test, err.Error()) - } - assert.Nil(test, req) - - { - request := &pb_legacy.RateLimitRequest{ - Domain: "test", - Descriptors: nil, - HitsAddend: 10, - } - - expectedRequest := &pb.RateLimitRequest{ - Domain: "test", - Descriptors: nil, - HitsAddend: 10, - } - - req, err := ratelimit.ConvertLegacyRequest(request) - if err != nil { - assert.FailNow(test, err.Error()) - } - - common.AssertProtoEqual(assert.New(test), expectedRequest, req) - } - - { - request := &pb_legacy.RateLimitRequest{ - Domain: "test", - Descriptors: []*pb_struct_legacy.RateLimitDescriptor{}, - HitsAddend: 10, - } - - expectedRequest := &pb.RateLimitRequest{ - Domain: "test", - Descriptors: []*pb_struct.RateLimitDescriptor{}, - HitsAddend: 10, - } - - req, err := ratelimit.ConvertLegacyRequest(request) - if err != nil { - assert.FailNow(test, err.Error()) - } - - common.AssertProtoEqual(assert.New(test), expectedRequest, req) - } - - { - descriptors := []*pb_struct_legacy.RateLimitDescriptor{ - { - Entries: []*pb_struct_legacy.RateLimitDescriptor_Entry{ - { - Key: "foo", - Value: "foo_value", - }, - nil, - }, - }, - { - Entries: []*pb_struct_legacy.RateLimitDescriptor_Entry{}, - }, - { - Entries: nil, - }, - nil, - } - - request := &pb_legacy.RateLimitRequest{ - Domain: "test", - Descriptors: descriptors, - HitsAddend: 10, - } - - expectedDescriptors := []*pb_struct.RateLimitDescriptor{ - { - Entries: []*pb_struct.RateLimitDescriptor_Entry{ - { - Key: "foo", - Value: "foo_value", - }, - nil, - }, - }, - { - Entries: []*pb_struct.RateLimitDescriptor_Entry{}, - }, - { - Entries: nil, - }, - nil, - } - - expectedRequest := &pb.RateLimitRequest{ - Domain: "test", - Descriptors: expectedDescriptors, - HitsAddend: 10, - } - - req, err := ratelimit.ConvertLegacyRequest(request) - if err != nil { - assert.FailNow(test, err.Error()) - } - - common.AssertProtoEqual(assert.New(test), expectedRequest, req) - } -} - -func TestConvertResponse(test *testing.T) { - resp, err := ratelimit.ConvertResponse(nil) - if err != nil { - assert.FailNow(test, err.Error()) - } - assert.Nil(test, resp) - - rl := &pb.RateLimitResponse_RateLimit{ - RequestsPerUnit: 10, - Unit: pb.RateLimitResponse_RateLimit_DAY, - } - - statuses := []*pb.RateLimitResponse_DescriptorStatus{ - { - Code: pb.RateLimitResponse_OK, - CurrentLimit: nil, - LimitRemaining: 9, - }, - nil, - { - Code: pb.RateLimitResponse_OVER_LIMIT, - CurrentLimit: rl, - LimitRemaining: 0, - }, - } - - requestHeadersToAdd := []*core.HeaderValue{{ - Key: "test_request", - Value: "test_request_value", - }, nil} - - responseHeadersToAdd := []*core.HeaderValue{{ - Key: "test_response", - Value: "test_response", - }, nil} - - response := &pb.RateLimitResponse{ - OverallCode: pb.RateLimitResponse_OVER_LIMIT, - Statuses: statuses, - RequestHeadersToAdd: requestHeadersToAdd, - ResponseHeadersToAdd: responseHeadersToAdd, - } - - expectedRl := &pb_legacy.RateLimitResponse_RateLimit{ - RequestsPerUnit: 10, - Unit: pb_legacy.RateLimitResponse_RateLimit_DAY, - } - - expectedStatuses := []*pb_legacy.RateLimitResponse_DescriptorStatus{ - { - Code: pb_legacy.RateLimitResponse_OK, - CurrentLimit: nil, - LimitRemaining: 9, - }, - nil, - { - Code: pb_legacy.RateLimitResponse_OVER_LIMIT, - CurrentLimit: expectedRl, - LimitRemaining: 0, - }, - } - - expectedRequestHeadersToAdd := []*core_legacy.HeaderValue{{ - Key: "test_request", - Value: "test_request_value", - }, nil} - - expecpectedResponseHeadersToAdd := []*core_legacy.HeaderValue{{ - Key: "test_response", - Value: "test_response", - }, nil} - - expectedResponse := &pb_legacy.RateLimitResponse{ - OverallCode: pb_legacy.RateLimitResponse_OVER_LIMIT, - Statuses: expectedStatuses, - RequestHeadersToAdd: expectedRequestHeadersToAdd, - Headers: expecpectedResponseHeadersToAdd, - } - - resp, err = ratelimit.ConvertResponse(response) - if err != nil { - assert.FailNow(test, err.Error()) - } - - common.AssertProtoEqual(assert.New(test), expectedResponse, resp) -} From d3311816898d03fdad2e54bc05bbca992c0b15eb Mon Sep 17 00:00:00 2001 From: Yuki Sawa Date: Wed, 4 Aug 2021 08:05:17 -0700 Subject: [PATCH 018/181] add sha for docker image with legacy v2 api removed (#279) Signed-off-by: Yuki Sawa --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b5768a99..15a8115d 100644 --- a/README.md +++ b/README.md @@ -64,7 +64,7 @@ Support for [v2 rls proto](https://github.com/envoyproxy/data-plane-api/blob/mas The current version of ratelimit protocol is changed to [v3 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) while [v2 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) is still supported as a legacy protocol. -4. `TODO` deleted support for legacy [v2 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) +4. `4bb32826` deleted support for legacy [v2 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) # Building and Testing From b42701cb8a3a2b35af81a23cefd8a4a05b717b22 Mon Sep 17 00:00:00 2001 From: Yuki Sawa Date: Thu, 12 Aug 2021 08:18:03 -0700 Subject: [PATCH 019/181] don't drop non-specified metrics in docker example (#283) Signed-off-by: Yuki Sawa --- examples/prom-statsd-exporter/conf.yaml | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/examples/prom-statsd-exporter/conf.yaml b/examples/prom-statsd-exporter/conf.yaml index d7dfbdb3..4ce3f64a 100644 --- a/examples/prom-statsd-exporter/conf.yaml +++ b/examples/prom-statsd-exporter/conf.yaml @@ -85,10 +85,9 @@ mappings: # Requires statsd exporter >= v0.6.0 since it uses the "drop" action. - match: "ratelimit.service.config_load_error" name: "ratelimit_service_config_load_error" match_metric_type: counter - - match: "ratelimit.service.config_load_error" - name: "ratelimit_service_config_load_error" - match_metric_type: counter - - match: "." - match_type: "regex" - action: "drop" - name: "dropped" + + # Enable below in production once you have the metrics you need + # - match: "." + # match_type: "regex" + # action: "drop" + # name: "dropped" From 38bc1460a08c5be6e0911a7ea813e46f815a4566 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Mon, 23 Aug 2021 14:47:57 -0600 Subject: [PATCH 020/181] owners: add Yuki (#284) Signed-off-by: Matt Klein --- OWNERS.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 OWNERS.md diff --git a/OWNERS.md b/OWNERS.md new file mode 100644 index 00000000..ac3a5660 --- /dev/null +++ b/OWNERS.md @@ -0,0 +1,2 @@ +* Matt Klein ([mattklein123](https://github.com/mattklein123)) (mklein@lyft.com) +* Yuki Sawa ([ysawa0](https://github.com/ysawa0)) (yukisawa@gmail.com) From 9c22ef414e9a2724822112c3b73d52e76b0a788e Mon Sep 17 00:00:00 2001 From: Javier Ruiz Date: Thu, 9 Sep 2021 13:24:02 -0400 Subject: [PATCH 021/181] Configurable Redis TLS Config through settings (#289) Signed-off-by: Javier Ruiz Arduengo --- src/redis/cache_impl.go | 4 ++-- src/redis/driver_impl.go | 8 ++++++-- src/settings/settings.go | 4 ++++ test/integration/integration_test.go | 1 + test/redis/bench_test.go | 2 +- test/redis/driver_impl_test.go | 6 +++--- 6 files changed, 17 insertions(+), 8 deletions(-) diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index 22679149..9bec14bb 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -15,11 +15,11 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca var perSecondPool Client if s.RedisPerSecond { perSecondPool = NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, s.RedisPerSecondSocketType, - s.RedisPerSecondType, s.RedisPerSecondUrl, s.RedisPerSecondPoolSize, s.RedisPerSecondPipelineWindow, s.RedisPerSecondPipelineLimit) + s.RedisPerSecondType, s.RedisPerSecondUrl, s.RedisPerSecondPoolSize, s.RedisPerSecondPipelineWindow, s.RedisPerSecondPipelineLimit, s.RedisTlsConfig) } var otherPool Client otherPool = NewClientImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisSocketType, s.RedisType, s.RedisUrl, s.RedisPoolSize, - s.RedisPipelineWindow, s.RedisPipelineLimit) + s.RedisPipelineWindow, s.RedisPipelineLimit, s.RedisTlsConfig) return NewFixedRateLimitCacheImpl( otherPool, diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index 302dd522..18a65df1 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -53,14 +53,18 @@ func checkError(err error) { } func NewClientImpl(scope stats.Scope, useTls bool, auth, redisSocketType, redisType, url string, poolSize int, - pipelineWindow time.Duration, pipelineLimit int) Client { + pipelineWindow time.Duration, pipelineLimit int, tlsConfig *tls.Config) Client { logger.Warnf("connecting to redis on %s with pool size %d", url, poolSize) df := func(network, addr string) (radix.Conn, error) { var dialOpts []radix.DialOpt if useTls { - dialOpts = append(dialOpts, radix.DialUseTLS(&tls.Config{})) + if tlsConfig != nil { + dialOpts = append(dialOpts, radix.DialUseTLS(tlsConfig)) + } else { + dialOpts = append(dialOpts, radix.DialUseTLS(&tls.Config{})) + } } if auth != "" { diff --git a/src/settings/settings.go b/src/settings/settings.go index 2646b5b2..4c5c1131 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -1,6 +1,7 @@ package settings import ( + "crypto/tls" "time" "github.com/kelseyhightower/envconfig" @@ -48,6 +49,9 @@ type Settings struct { RedisPoolSize int `envconfig:"REDIS_POOL_SIZE" default:"10"` RedisAuth string `envconfig:"REDIS_AUTH" default:""` RedisTls bool `envconfig:"REDIS_TLS" default:"false"` + // TODO: Make this setting configurable out of the box instead of having to provide it through code. + RedisTlsConfig *tls.Config + // RedisPipelineWindow sets the duration after which internal pipelines will be flushed. // If window is zero then implicit pipelining will be disabled. Radix use 150us for the // default value, see https://github.com/mediocregopher/radix/blob/v3.5.1/pool.go#L278. diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 784944f6..2699b405 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -223,6 +223,7 @@ func TestMultiNodeMemcache(t *testing.T) { func testBasicConfigAuthTLS(perSecond bool, local_cache_size int) func(*testing.T) { s := makeSimpleRedisSettings(16381, 16382, perSecond, local_cache_size) + s.RedisTlsConfig = nil s.RedisAuth = "password123" s.RedisTls = true s.RedisPerSecondAuth = "password123" diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index 1e3ddd72..bfb003ba 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -44,7 +44,7 @@ func BenchmarkParallelDoLimit(b *testing.B) { return func(b *testing.B) { statsStore := gostats.NewStore(gostats.NewNullSink(), false) sm := stats.NewMockStatManager(statsStore) - client := redis.NewClientImpl(statsStore, false, "", "tcp", "single", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit) + client := redis.NewClientImpl(statsStore, false, "", "tcp", "single", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit, nil) defer client.Close() cache := redis.NewFixedRateLimitCacheImpl(client, nil, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, "", sm) diff --git a/test/redis/driver_impl_test.go b/test/redis/driver_impl_test.go index 0653cc4c..b4858da1 100644 --- a/test/redis/driver_impl_test.go +++ b/test/redis/driver_impl_test.go @@ -36,7 +36,7 @@ func testNewClientImpl(t *testing.T, pipelineWindow time.Duration, pipelineLimit statsStore := stats.NewStore(stats.NewNullSink(), false) mkRedisClient := func(auth, addr string) redis.Client { - return redis.NewClientImpl(statsStore, false, auth, "tcp", "single", addr, 1, pipelineWindow, pipelineLimit) + return redis.NewClientImpl(statsStore, false, auth, "tcp", "single", addr, 1, pipelineWindow, pipelineLimit, nil) } t.Run("connection refused", func(t *testing.T) { @@ -103,7 +103,7 @@ func TestDoCmd(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) mkRedisClient := func(addr string) redis.Client { - return redis.NewClientImpl(statsStore, false, "", "tcp", "single", addr, 1, 0, 0) + return redis.NewClientImpl(statsStore, false, "", "tcp", "single", addr, 1, 0, 0, nil) } t.Run("SETGET ok", func(t *testing.T) { @@ -148,7 +148,7 @@ func testPipeDo(t *testing.T, pipelineWindow time.Duration, pipelineLimit int) f statsStore := stats.NewStore(stats.NewNullSink(), false) mkRedisClient := func(addr string) redis.Client { - return redis.NewClientImpl(statsStore, false, "", "tcp", "single", addr, 1, pipelineWindow, pipelineLimit) + return redis.NewClientImpl(statsStore, false, "", "tcp", "single", addr, 1, pipelineWindow, pipelineLimit, nil) } t.Run("SETGET ok", func(t *testing.T) { From 568c537dca0f9630bc21530d2008edf449c6beeb Mon Sep 17 00:00:00 2001 From: lmajercak-wish Date: Thu, 9 Sep 2021 20:23:33 -0700 Subject: [PATCH 022/181] Add configurable GrpcMaxConnectionAge, GrpcMaxConnectionAgeGrace (#288) * Add configurable GrpcMaxConnectionAge, GrpcMaxConnectionAgeGrace Signed-off-by: lmajercak-wish * Add configurable GrpcMaxConnectionAge, GrpcMaxConnectionAgeGrace Signed-off-by: lmajercak-wish * Fix fmt Signed-off-by: lmajercak-wish * Add to README Signed-off-by: lmajercak-wish * Fix README format Signed-off-by: lmajercak-wish --- README.md | 7 +++++++ src/server/server_impl.go | 9 ++++++++- src/settings/settings.go | 7 +++++++ 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 15a8115d..adba6164 100644 --- a/README.md +++ b/README.md @@ -22,6 +22,7 @@ - [Example 5](#example-5) - [Loading Configuration](#loading-configuration) - [Log Format](#log-format) + - [GRPC Keepalive](#grpc-keepalive) - [Request Fields](#request-fields) - [GRPC Client](#grpc-client) - [Commandline flags](#commandline-flags) @@ -407,6 +408,12 @@ Output example: {"@message":"waiting for runtime update","@timestamp":"2020-09-10T17:22:44.926267808Z","level":"debug"} ``` +## GRPC Keepalive +Client-side GRPC DNS re-resolution in scenarios with auto scaling enabled might not work as expected and the current workaround is to [configure connection keepalive](https://github.com/grpc/grpc/issues/12295#issuecomment-382794204) on server-side. +The behavior can be fixed by configuring the following env variables for the ratelimit server: +* `GRPC_MAX_CONNECTION_AGE`: a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway. A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms. +* `GRPC_MAX_CONNECTION_AGE_GRACE`: an additive period after MaxConnectionAge after which the connection will be forcibly closed. + # Request Fields For information on the fields of a Ratelimit gRPC request please read the information diff --git a/src/server/server_impl.go b/src/server/server_impl.go index f7e329d2..c3f3dbb7 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -4,6 +4,7 @@ import ( "bytes" "expvar" "fmt" + "google.golang.org/grpc/keepalive" "io" "net/http" "net/http/pprof" @@ -178,7 +179,13 @@ func newServer(s settings.Settings, name string, statsManager stats.Manager, loc } ret := new(server) - ret.grpcServer = grpc.NewServer(s.GrpcUnaryInterceptor) + + keepaliveOpt := grpc.KeepaliveParams(keepalive.ServerParameters{ + MaxConnectionAge: s.GrpcMaxConnectionAge, + MaxConnectionAgeGrace: s.GrpcMaxConnectionAgeGrace, + }) + + ret.grpcServer = grpc.NewServer(s.GrpcUnaryInterceptor, keepaliveOpt) // setup listen addresses ret.httpAddress = net.JoinHostPort(s.Host, strconv.Itoa(s.Port)) diff --git a/src/settings/settings.go b/src/settings/settings.go index 4c5c1131..7cd232c9 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -19,6 +19,13 @@ type Settings struct { DebugHost string `envconfig:"DEBUG_HOST" default:"0.0.0.0"` DebugPort int `envconfig:"DEBUG_PORT" default:"6070"` + // GRPC server settings + // GrpcMaxConnectionAge is a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway. + // A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms. + GrpcMaxConnectionAge time.Duration `envconfig:"GRPC_MAX_CONNECTION_AGE" default:"24h" description:"Duration a connection may exist before it will be closed by sending a GoAway."` + // GrpcMaxConnectionAgeGrace is an additive period after MaxConnectionAge after which the connection will be forcibly closed. + GrpcMaxConnectionAgeGrace time.Duration `envconfig:"GRPC_MAX_CONNECTION_AGE_GRACE" default:"1h" description:"Period after MaxConnectionAge after which the connection will be forcibly closed."` + // Logging settings LogLevel string `envconfig:"LOG_LEVEL" default:"WARN"` LogFormat string `envconfig:"LOG_FORMAT" default:"text"` From ced4263e17d08d23296e34c1ab9f939b7d466c15 Mon Sep 17 00:00:00 2001 From: petedmarsh Date: Tue, 28 Sep 2021 17:05:15 +0200 Subject: [PATCH 023/181] Fix MEMCACHED_SRV support (#295) This did not work in practice due to a mix up of func example(serverList memcache.ServerList) and func example(serverList *memcache.ServerList) The code used the first case and did not correctly update a given memcache.ServerList instance, instead a new one was created, updated then forgot. Signed-off-by: Peter Marsh --- src/memcached/cache_impl.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index 4b21af33..72bc216f 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -176,7 +176,7 @@ func (this *rateLimitMemcacheImpl) Flush() { this.waitGroup.Wait() } -func refreshServersPeriodically(serverList memcache.ServerList, srv string, d time.Duration, finish <-chan struct{}) { +func refreshServersPeriodically(serverList *memcache.ServerList, srv string, d time.Duration, finish <-chan struct{}) { t := time.NewTicker(d) defer t.Stop() for { @@ -194,7 +194,7 @@ func refreshServersPeriodically(serverList memcache.ServerList, srv string, d ti } } -func refreshServers(serverList memcache.ServerList, srv_ string) error { +func refreshServers(serverList *memcache.ServerList, srv_ string) error { servers, err := srv.ServerStringsFromSrv(srv_) if err != nil { return err @@ -208,7 +208,7 @@ func refreshServers(serverList memcache.ServerList, srv_ string) error { func newMemcachedFromSrv(srv_ string, d time.Duration) Client { serverList := new(memcache.ServerList) - err := refreshServers(*serverList, srv_) + err := refreshServers(serverList, srv_) if err != nil { errorText := "Unable to fetch servers from SRV" logger.Errorf(errorText) @@ -218,7 +218,7 @@ func newMemcachedFromSrv(srv_ string, d time.Duration) Client { if d > 0 { logger.Infof("refreshing memcache hosts every: %v milliseconds", d.Milliseconds()) finish := make(chan struct{}) - go refreshServersPeriodically(*serverList, srv_, d, finish) + go refreshServersPeriodically(serverList, srv_, d, finish) } else { logger.Debugf("not periodically refreshing memcached hosts") } From 35b6056d93c4ab5b0eb5d3e04deb6b4df2c6b261 Mon Sep 17 00:00:00 2001 From: jespersoderlund Date: Wed, 29 Sep 2021 23:40:54 +0200 Subject: [PATCH 024/181] =?UTF-8?q?Addressing=20issue=20#291=20-=20Adding?= =?UTF-8?q?=20custom=20headers=20with=20the=20ratelimit=20trig=E2=80=A6=20?= =?UTF-8?q?(#292)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Addressing issue #291 - Adding custom headers with the ratelimit triggered Signed-off-by: Jesper Söderlund * Fixing doc format Signed-off-by: Jesper Söderlund * Fixing data race condition during config-reload Signed-off-by: Jesper Söderlund * Review comments Signed-off-by: Jesper Söderlund * Review comments Signed-off-by: Jesper Söderlund * Changed settings approach. Refactored custom clock to use already existing TimeSource Signed-off-by: Jesper Söderlund * Cleanup after timesource refactoring Signed-off-by: Jesper Söderlund * Fixed review comments Signed-off-by: Jesper Söderlund Co-authored-by: Jesper Söderlund --- README.md | 10 +++ src/limiter/base_limiter.go | 7 +- src/service/ratelimit.go | 98 +++++++++++++++++++--- src/service_cmd/runner/runner.go | 6 +- src/settings/settings.go | 9 +++ src/utils/utilities.go | 4 +- test/memcached/cache_impl_test.go | 47 +++++------ test/redis/fixed_cache_impl_test.go | 39 ++++----- test/service/ratelimit_test.go | 121 +++++++++++++++++++++++++++- 9 files changed, 279 insertions(+), 62 deletions(-) diff --git a/README.md b/README.md index adba6164..40691ab6 100644 --- a/README.md +++ b/README.md @@ -38,6 +38,7 @@ - [One Redis Instance](#one-redis-instance) - [Two Redis Instances](#two-redis-instances) - [Memcache](#memcache) +- [Custom headers](#custom-headers) - [Contact](#contact) @@ -625,6 +626,15 @@ descriptors will fail. Descriptors sent to Memcache should not contain whitespac When using multiple memcache nodes in `MEMCACHE_HOST_PORT=`, one should provide the identical list of memcache nodes to all ratelimiter instances to ensure that a particular cache key is always hashed to the same memcache node. +# Custom headers +Ratelimit service can be configured to return custom headers with the ratelimit information. It will populate the response_headers_to_add as part of the [RateLimitResponse](https://www.envoyproxy.io/docs/envoy/latest/api-v3/service/ratelimit/v3/rls.proto#service-ratelimit-v3-ratelimitresponse). + +The following environment variables control the custom response feature: +1. `LIMIT_RESPONSE_HEADERS_ENABLED` - Enables the custom response headers +1. `LIMIT_LIMIT_HEADER` - The default value is "RateLimit-Limit", setting the environment variable will specify an alternative header name +1. `LIMIT_REMAINING_HEADER` - The default value is "RateLimit-Remaining", setting the environment variable will specify an alternative header name +1. `LIMIT_RESET_HEADER` - The default value is "RateLimit-Reset", setting the environment variable will specify an alternative header name + # Contact * [envoy-announce](https://groups.google.com/forum/#!forum/envoy-announce): Low frequency mailing diff --git a/src/limiter/base_limiter.go b/src/limiter/base_limiter.go index 346ff871..6a9ee927 100644 --- a/src/limiter/base_limiter.go +++ b/src/limiter/base_limiter.go @@ -1,6 +1,9 @@ package limiter import ( + "math" + "math/rand" + "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/assert" @@ -8,8 +11,6 @@ import ( "github.com/envoyproxy/ratelimit/src/stats" "github.com/envoyproxy/ratelimit/src/utils" logger "github.com/sirupsen/logrus" - "math" - "math/rand" ) type BaseRateLimiter struct { @@ -168,7 +169,7 @@ func (this *BaseRateLimiter) generateResponseDescriptorStatus(responseCode pb.Ra Code: responseCode, CurrentLimit: limit, LimitRemaining: limitRemaining, - DurationUntilReset: utils.CalculateReset(limit, this.timeSource), + DurationUntilReset: utils.CalculateReset(&limit.Unit, this.timeSource), } } else { return &pb.RateLimitResponse_DescriptorStatus{ diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index b8d1c0bd..b4c2f15e 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -2,11 +2,16 @@ package ratelimit import ( "fmt" - "github.com/envoyproxy/ratelimit/src/stats" "math" + "strconv" "strings" "sync" + "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/stats" + "github.com/envoyproxy/ratelimit/src/utils" + + core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" @@ -23,14 +28,19 @@ type RateLimitServiceServer interface { } type service struct { - runtime loader.IFace - configLock sync.RWMutex - configLoader config.RateLimitConfigLoader - config config.RateLimitConfig - runtimeUpdateEvent chan int - cache limiter.RateLimitCache - stats stats.ServiceStats - runtimeWatchRoot bool + runtime loader.IFace + configLock sync.RWMutex + configLoader config.RateLimitConfigLoader + config config.RateLimitConfig + runtimeUpdateEvent chan int + cache limiter.RateLimitCache + stats stats.ServiceStats + runtimeWatchRoot bool + customHeadersEnabled bool + customHeaderLimitHeader string + customHeaderRemainingHeader string + customHeaderResetHeader string + customHeaderClock utils.TimeSource } func (this *service) reloadConfig(statsManager stats.Manager) { @@ -58,8 +68,20 @@ func (this *service) reloadConfig(statsManager stats.Manager) { newConfig := this.configLoader.Load(files, statsManager) this.stats.ConfigLoadSuccess.Inc() + this.configLock.Lock() this.config = newConfig + rlSettings := settings.NewSettings() + + if rlSettings.RateLimitResponseHeadersEnabled { + this.customHeadersEnabled = true + + this.customHeaderLimitHeader = rlSettings.HeaderRatelimitLimit + + this.customHeaderRemainingHeader = rlSettings.HeaderRatelimitRemaining + + this.customHeaderResetHeader = rlSettings.HeaderRatelimitReset + } this.configLock.Unlock() } @@ -118,6 +140,8 @@ func (this *service) constructLimitsToCheck(request *pb.RateLimitRequest, ctx co return limitsToCheck, isUnlimited } +const MaxUint32 = uint32(1<<32 - 1) + func (this *service) shouldRateLimitWorker( ctx context.Context, request *pb.RateLimitRequest) *pb.RateLimitResponse { @@ -132,7 +156,20 @@ func (this *service) shouldRateLimitWorker( response := &pb.RateLimitResponse{} response.Statuses = make([]*pb.RateLimitResponse_DescriptorStatus, len(request.Descriptors)) finalCode := pb.RateLimitResponse_OK + + // Keep track of the descriptor which is closest to hit the ratelimit + minLimitRemaining := MaxUint32 + var minimumDescriptor *pb.RateLimitResponse_DescriptorStatus = nil + for i, descriptorStatus := range responseDescriptorStatuses { + // Keep track of the descriptor closest to hit the ratelimit + if this.customHeadersEnabled && + descriptorStatus.CurrentLimit != nil && + descriptorStatus.LimitRemaining < minLimitRemaining { + minimumDescriptor = descriptorStatus + minLimitRemaining = descriptorStatus.LimitRemaining + } + if isUnlimited[i] { response.Statuses[i] = &pb.RateLimitResponse_DescriptorStatus{ Code: pb.RateLimitResponse_OK, @@ -142,14 +179,54 @@ func (this *service) shouldRateLimitWorker( response.Statuses[i] = descriptorStatus if descriptorStatus.Code == pb.RateLimitResponse_OVER_LIMIT { finalCode = descriptorStatus.Code + + minimumDescriptor = descriptorStatus + minLimitRemaining = 0 } } } + // Add Headers if requested + if this.customHeadersEnabled && minimumDescriptor != nil { + response.ResponseHeadersToAdd = []*core.HeaderValue{ + this.rateLimitLimitHeader(minimumDescriptor), + this.rateLimitRemainingHeader(minimumDescriptor), + this.rateLimitResetHeader(minimumDescriptor), + } + } + response.OverallCode = finalCode return response } +func (this *service) rateLimitLimitHeader(descriptor *pb.RateLimitResponse_DescriptorStatus) *core.HeaderValue { + + // Limit header only provides the mandatory part from the spec, the actual limit + // the optional quota policy is currently not provided + return &core.HeaderValue{ + Key: this.customHeaderLimitHeader, + Value: strconv.FormatUint(uint64(descriptor.CurrentLimit.RequestsPerUnit), 10), + } +} + +func (this *service) rateLimitRemainingHeader(descriptor *pb.RateLimitResponse_DescriptorStatus) *core.HeaderValue { + + // How much of the limit is remaining + return &core.HeaderValue{ + Key: this.customHeaderRemainingHeader, + Value: strconv.FormatUint(uint64(descriptor.LimitRemaining), 10), + } +} + +func (this *service) rateLimitResetHeader( + descriptor *pb.RateLimitResponse_DescriptorStatus) *core.HeaderValue { + + return &core.HeaderValue{ + Key: this.customHeaderResetHeader, + Value: strconv.FormatInt(utils.CalculateReset(&descriptor.CurrentLimit.Unit, this.customHeaderClock).GetSeconds(), 10), + } +} + func (this *service) ShouldRateLimit( ctx context.Context, request *pb.RateLimitRequest) (finalResponse *pb.RateLimitResponse, finalError error) { @@ -190,7 +267,7 @@ func (this *service) GetCurrentConfig() config.RateLimitConfig { } func NewService(runtime loader.IFace, cache limiter.RateLimitCache, - configLoader config.RateLimitConfigLoader, statsManager stats.Manager, runtimeWatchRoot bool) RateLimitServiceServer { + configLoader config.RateLimitConfigLoader, statsManager stats.Manager, runtimeWatchRoot bool, clock utils.TimeSource) RateLimitServiceServer { newService := &service{ runtime: runtime, @@ -201,6 +278,7 @@ func NewService(runtime loader.IFace, cache limiter.RateLimitCache, cache: cache, stats: statsManager.NewServiceStats(), runtimeWatchRoot: runtimeWatchRoot, + customHeaderClock: clock, } runtime.AddUpdateCallback(newService.runtimeUpdateEvent) diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 31865045..d88d34ca 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -1,8 +1,6 @@ package runner import ( - "github.com/envoyproxy/ratelimit/src/metrics" - "github.com/envoyproxy/ratelimit/src/stats" "io" "math/rand" "net/http" @@ -10,6 +8,9 @@ import ( "sync" "time" + "github.com/envoyproxy/ratelimit/src/metrics" + "github.com/envoyproxy/ratelimit/src/stats" + gostats "github.com/lyft/gostats" "github.com/coocood/freecache" @@ -107,6 +108,7 @@ func (runner *Runner) Run() { config.NewRateLimitConfigLoaderImpl(), runner.statsManager, s.RuntimeWatchRoot, + utils.NewTimeSourceImpl(), ) srv.AddDebugHttpEndpoint( diff --git a/src/settings/settings.go b/src/settings/settings.go index 7cd232c9..7f0425db 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -49,6 +49,15 @@ type Settings struct { CacheKeyPrefix string `envconfig:"CACHE_KEY_PREFIX" default:""` BackendType string `envconfig:"BACKEND_TYPE" default:"redis"` + // Settings for optional returning of custom headers + RateLimitResponseHeadersEnabled bool `envconfig:"LIMIT_RESPONSE_HEADERS_ENABLED" default:"false"` + // value: the current limit + HeaderRatelimitLimit string `envconfig:"LIMIT_LIMIT_HEADER" default:"RateLimit-Limit"` + // value: remaining count + HeaderRatelimitRemaining string `envconfig:"LIMIT_REMAINING_HEADER" default:"RateLimit-Remaining"` + // value: remaining seconds + HeaderRatelimitReset string `envconfig:"LIMIT_RESET_HEADER" default:"RateLimit-Reset"` + // Redis settings RedisSocketType string `envconfig:"REDIS_SOCKET_TYPE" default:"unix"` RedisType string `envconfig:"REDIS_TYPE" default:"SINGLE"` diff --git a/src/utils/utilities.go b/src/utils/utilities.go index e6029f5b..c8001b03 100644 --- a/src/utils/utilities.go +++ b/src/utils/utilities.go @@ -29,8 +29,8 @@ func UnitToDivider(unit pb.RateLimitResponse_RateLimit_Unit) int64 { panic("should not get here") } -func CalculateReset(currentLimit *pb.RateLimitResponse_RateLimit, timeSource TimeSource) *duration.Duration { - sec := UnitToDivider(currentLimit.Unit) +func CalculateReset(unit *pb.RateLimitResponse_RateLimit_Unit, timeSource TimeSource) *duration.Duration { + sec := UnitToDivider(*unit) now := timeSource.UnixNow() return &duration.Duration{Seconds: sec - now%sec} } diff --git a/test/memcached/cache_impl_test.go b/test/memcached/cache_impl_test.go index 5eef2c39..2b69ecfe 100644 --- a/test/memcached/cache_impl_test.go +++ b/test/memcached/cache_impl_test.go @@ -5,11 +5,12 @@ package memcached_test import ( - mockstats "github.com/envoyproxy/ratelimit/test/mocks/stats" "math/rand" "strconv" "testing" + mockstats "github.com/envoyproxy/ratelimit/test/mocks/stats" + "github.com/bradfitz/gomemcache/memcache" "github.com/coocood/freecache" @@ -49,7 +50,7 @@ func TestMemcached(t *testing.T) { limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -73,7 +74,7 @@ func TestMemcached(t *testing.T) { config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[1].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[1].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[1].Stats.OverLimit.Value()) @@ -104,8 +105,8 @@ func TestMemcached(t *testing.T) { config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}, - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[1].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -140,7 +141,7 @@ func TestMemcachedGetError(t *testing.T) { limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -158,7 +159,7 @@ func TestMemcachedGetError(t *testing.T) { limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value1"), false)} assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -232,7 +233,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -252,7 +253,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -272,7 +273,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -289,7 +290,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { client.EXPECT().Increment("domain_key4_value4_997200", uint64(1)).Times(0) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(4), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) @@ -328,7 +329,7 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -344,7 +345,7 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -361,7 +362,7 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -380,7 +381,7 @@ func TestNearLimit(t *testing.T) { limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"), false)} assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -398,7 +399,7 @@ func TestNearLimit(t *testing.T) { limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"), false)} assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -416,7 +417,7 @@ func TestNearLimit(t *testing.T) { limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"), false)} assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -434,7 +435,7 @@ func TestNearLimit(t *testing.T) { limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"), false)} assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) @@ -452,7 +453,7 @@ func TestNearLimit(t *testing.T) { limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"), false)} assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(7), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) @@ -470,7 +471,7 @@ func TestNearLimit(t *testing.T) { limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"), false)} assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(3), limits[0].Stats.OverLimit.Value()) @@ -514,7 +515,7 @@ func TestMemcacheWithJitter(t *testing.T) { limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -557,7 +558,7 @@ func TestMemcacheAdd(t *testing.T) { limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -581,7 +582,7 @@ func TestMemcacheAdd(t *testing.T) { limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2"), false)} assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) diff --git a/test/redis/fixed_cache_impl_test.go b/test/redis/fixed_cache_impl_test.go index 93dcf885..4b9575a1 100644 --- a/test/redis/fixed_cache_impl_test.go +++ b/test/redis/fixed_cache_impl_test.go @@ -1,9 +1,10 @@ package redis_test import ( - "github.com/envoyproxy/ratelimit/test/mocks/stats" "testing" + "github.com/envoyproxy/ratelimit/test/mocks/stats" + "github.com/coocood/freecache" "github.com/mediocregopher/radix/v3" @@ -66,7 +67,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -91,7 +92,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[1].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[1].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[1].Stats.OverLimit.Value()) @@ -119,8 +120,8 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}, - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[1].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -197,7 +198,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -217,7 +218,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -237,7 +238,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -255,7 +256,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { "EXPIRE", "domain_key4_value4_997200", int64(3600)).Times(0) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(4), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) @@ -292,7 +293,7 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -308,7 +309,7 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -325,7 +326,7 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -343,7 +344,7 @@ func TestNearLimit(t *testing.T) { limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"), false)} assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -360,7 +361,7 @@ func TestNearLimit(t *testing.T) { limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"), false)} assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -377,7 +378,7 @@ func TestNearLimit(t *testing.T) { limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"), false)} assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -394,7 +395,7 @@ func TestNearLimit(t *testing.T) { limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"), false)} assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) @@ -411,7 +412,7 @@ func TestNearLimit(t *testing.T) { limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"), false)} assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(7), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) @@ -428,7 +429,7 @@ func TestNearLimit(t *testing.T) { limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"), false)} assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(3), limits[0].Stats.OverLimit.Value()) @@ -458,7 +459,7 @@ func TestRedisWithJitter(t *testing.T) { limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index 64c44ba1..6c38583a 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -1,11 +1,15 @@ package ratelimit_test import ( - "github.com/envoyproxy/ratelimit/src/stats" "math" + "os" "sync" "testing" + "github.com/envoyproxy/ratelimit/src/stats" + "github.com/envoyproxy/ratelimit/src/utils" + + core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/redis" @@ -60,8 +64,15 @@ type rateLimitServiceTestSuite struct { runtimeUpdateCallback chan<- int statsManager stats.Manager statStore gostats.Store + mockClock utils.TimeSource +} + +type MockClock struct { + now int64 } +func (c MockClock) UnixNow() int64 { return c.now } + func commonSetup(t *testing.T) rateLimitServiceTestSuite { ret := rateLimitServiceTestSuite{} ret.assert = assert.New(t) @@ -87,7 +98,7 @@ func (this *rateLimitServiceTestSuite) setupBasicService() ratelimit.RateLimitSe this.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Return(this.config) - return ratelimit.NewService(this.runtime, this.cache, this.configLoader, this.statsManager, true) + return ratelimit.NewService(this.runtime, this.cache, this.configLoader, this.statsManager, true, MockClock{now: int64(2222)}) } func TestService(test *testing.T) { @@ -176,6 +187,110 @@ func TestService(test *testing.T) { t.assert.EqualValues(1, t.statStore.NewCounter("config_load_error").Value()) } +func TestServiceWithCustomRatelimitHeaders(test *testing.T) { + os.Setenv("LIMIT_RESPONSE_HEADERS_ENABLED", "true") + os.Setenv("LIMIT_LIMIT_HEADER", "A-Ratelimit-Limit") + os.Setenv("LIMIT_REMAINING_HEADER", "A-Ratelimit-Remaining") + os.Setenv("LIMIT_RESET_HEADER", "A-Ratelimit-Reset") + defer func() { + os.Unsetenv("LIMIT_RESPONSE_HEADERS_ENABLED") + os.Unsetenv("LIMIT_LIMIT_HEADER") + os.Unsetenv("LIMIT_REMAINING_HEADER") + os.Unsetenv("LIMIT_RESET_HEADER") + }() + + t := commonSetup(test) + defer t.controller.Finish() + service := t.setupBasicService() + + // Config reload. + barrier := newBarrier() + t.configLoader.EXPECT().Load( + []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( + func([]config.RateLimitConfigToLoad, stats.Manager) { barrier.signal() }).Return(t.config) + t.runtimeUpdateCallback <- 1 + barrier.wait() + + // Make request + request := common.NewRateLimitRequest( + "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) + limits := []*config.RateLimit{ + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false), + nil} + t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) + t.cache.EXPECT().DoLimit(nil, request, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) + + response, err := service.ShouldRateLimit(nil, request) + common.AssertProtoEqual( + t.assert, + &pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OVER_LIMIT, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + }, + ResponseHeadersToAdd: []*core.HeaderValue{ + {Key: "A-Ratelimit-Limit", Value: "10"}, + {Key: "A-Ratelimit-Remaining", Value: "0"}, + {Key: "A-Ratelimit-Reset", Value: "58"}, + }, + }, + response) + t.assert.Nil(err) +} + +func TestServiceWithDefaultRatelimitHeaders(test *testing.T) { + os.Setenv("LIMIT_RESPONSE_HEADERS_ENABLED", "true") + defer func() { + os.Unsetenv("LIMIT_RESPONSE_HEADERS_ENABLED") + }() + + t := commonSetup(test) + defer t.controller.Finish() + service := t.setupBasicService() + + // Config reload. + barrier := newBarrier() + t.configLoader.EXPECT().Load( + []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( + func([]config.RateLimitConfigToLoad, stats.Manager) { barrier.signal() }).Return(t.config) + t.runtimeUpdateCallback <- 1 + barrier.wait() + + // Make request + request := common.NewRateLimitRequest( + "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) + limits := []*config.RateLimit{ + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false), + nil} + t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) + t.cache.EXPECT().DoLimit(nil, request, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) + + response, err := service.ShouldRateLimit(nil, request) + common.AssertProtoEqual( + t.assert, + &pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OVER_LIMIT, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + }, + ResponseHeadersToAdd: []*core.HeaderValue{ + {Key: "RateLimit-Limit", Value: "10"}, + {Key: "RateLimit-Remaining", Value: "0"}, + {Key: "RateLimit-Reset", Value: "58"}, + }, + }, + response) + t.assert.Nil(err) +} + func TestEmptyDomain(test *testing.T) { t := commonSetup(test) defer t.controller.Finish() @@ -233,7 +348,7 @@ func TestInitialLoadError(test *testing.T) { func([]config.RateLimitConfigToLoad, stats.Manager) { panic(config.RateLimitConfigError("load error")) }) - service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statsManager, true) + service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statsManager, true, t.mockClock) request := common.NewRateLimitRequest("test-domain", [][][2]string{{{"hello", "world"}}}, 1) response, err := service.ShouldRateLimit(nil, request) From 1c1d46ec4abf505c5817d99af117519c8bb47b92 Mon Sep 17 00:00:00 2001 From: jespersoderlund Date: Tue, 5 Oct 2021 04:38:52 +0200 Subject: [PATCH 025/181] 293 shadowmode (#294) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Global ShadowMod Signed-off-by: Jesper Söderlund * Adding shadow-mode issue #293 Signed-off-by: Jesper Söderlund * Fix doc format Signed-off-by: Jesper Söderlund * Extended example to showcase shadow mode as well, build a docker image with the current rate limit, extend envoy config to display rate limit headers. Extend statsd to show shadow mode Signed-off-by: Jesper Söderlund * Fine tuning of docs and example Signed-off-by: Jesper Söderlund * Added integration tests, fixed some review comments Signed-off-by: Jesper Söderlund * Doc format fix Signed-off-by: Jesper Söderlund * gitignore for vscode Signed-off-by: Jesper Söderlund Co-authored-by: Jesper Söderlund --- .gitignore | 1 + Makefile | 6 + README.md | 92 +++++++++++- examples/envoy/proxy.yaml | 1 + examples/prom-statsd-exporter/conf.yaml | 10 ++ examples/ratelimit/config/example.yaml | 11 ++ integration-test/Dockerfile.tester | 7 + .../docker-compose-integration-test.yml | 111 ++++++++++++++ integration-test/run-all.sh | 15 ++ integration-test/scripts/simple-get.sh | 8 + integration-test/scripts/trigger-ratelimit.sh | 28 ++++ .../scripts/trigger-shadow-mode-key.sh | 25 ++++ src/config/config.go | 9 +- src/config/config_impl.go | 18 ++- src/limiter/base_limiter.go | 73 +++++---- src/redis/fixed_cache_impl.go | 13 +- src/service/ratelimit.go | 16 +- src/service_cmd/runner/runner.go | 1 + src/settings/settings.go | 3 + src/stats/manager.go | 8 +- src/stats/manager_impl.go | 2 + test/config/config_test.go | 81 +++++++++- test/config/shadowmode_config.yaml | 33 +++++ test/limiter/base_limiter_test.go | 90 +++++++++++- test/memcached/cache_impl_test.go | 34 ++--- test/metrics/metrics_test.go | 5 +- test/mocks/stats/manager.go | 3 + test/redis/bench_test.go | 2 +- test/redis/fixed_cache_impl_test.go | 128 ++++++++++++++-- test/service/ratelimit_test.go | 138 ++++++++++++++++-- 30 files changed, 871 insertions(+), 101 deletions(-) create mode 100644 integration-test/Dockerfile.tester create mode 100644 integration-test/docker-compose-integration-test.yml create mode 100755 integration-test/run-all.sh create mode 100755 integration-test/scripts/simple-get.sh create mode 100755 integration-test/scripts/trigger-ratelimit.sh create mode 100755 integration-test/scripts/trigger-shadow-mode-key.sh create mode 100644 test/config/shadowmode_config.yaml diff --git a/.gitignore b/.gitignore index 7897ead0..826a29f4 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ cover.out bin/ .idea/ +.vscode/ vendor cert.pem key.pem diff --git a/Makefile b/Makefile index ef6ffc0c..739d8757 100644 --- a/Makefile +++ b/Makefile @@ -113,3 +113,9 @@ docker_image: docker_tests .PHONY: docker_push docker_push: docker_image docker push $(IMAGE):$(VERSION) + +.PHONY: integration-tests +integration-tests: + docker-compose --project-dir $(PWD) -f integration-test/docker-compose-integration-test.yml up --build --exit-code-from tester + +# docker-compose --project-dir $(PWD) -f integration-test/docker-compose-integration-test.yml up --build --exit-code-from tester diff --git a/README.md b/README.md index 40691ab6..98861a14 100644 --- a/README.md +++ b/README.md @@ -9,24 +9,30 @@ - [Building and Testing](#building-and-testing) - [Docker-compose setup](#docker-compose-setup) - [Full test environment](#full-test-environment) + - [Self-contained end-to-end integration test](#self-contained-end-to-end-integration-test) - [Configuration](#configuration) - [The configuration format](#the-configuration-format) - [Definitions](#definitions) - [Descriptor list definition](#descriptor-list-definition) - [Rate limit definition](#rate-limit-definition) + - [ShadowMode](#shadowmode) - [Examples](#examples) - [Example 1](#example-1) - [Example 2](#example-2) - [Example 3](#example-3) - [Example 4](#example-4) - [Example 5](#example-5) + - [Example 6](#example-6) - [Loading Configuration](#loading-configuration) - [Log Format](#log-format) - [GRPC Keepalive](#grpc-keepalive) - [Request Fields](#request-fields) - [GRPC Client](#grpc-client) - [Commandline flags](#commandline-flags) -- [Statistics](#statistics) +- [Global ShadowMode](#global-shadowmode) + - [Configuration](#configuration-1) + - [Statistics](#statistics) +- [Statistics](#statistics-1) - [Statistics options](#statistics-options) - [HTTP Port](#http-port) - [/json endpoint](#json-endpoint) @@ -120,20 +126,40 @@ as explained in the [two redis instances](#two-redis-instances) section. ## Full test environment To run a fully configured environment to demo Envoy based rate limiting, run: ```bash -docker-compose -f docker-compose-example.yml up +docker-compose -f docker-compose-example.yml up --build --remove-orphans ``` This will run ratelimit, redis, prom-statsd-exporter and two Envoy containers such that you can demo rate limiting by hitting the below endpoints. ```bash curl localhost:8888/test curl localhost:8888/header -H "foo: foo" # Header based curl localhost:8888/twoheader -H "foo: foo" -H "bar: bar" # Two headers -curl localhost:8888/twoheader -H "foo: foo" -H "baz: baz" +curl localhost:8888/twoheader -H "foo: foo" -H "baz: baz" # This will be rate limited curl localhost:8888/twoheader -H "foo: foo" -H "bar: banned" # Ban a particular header value +curl localhost:8888/twoheader -H "foo: foo" -H "baz: shady" # This will never be ratelimited since "baz" with value "shady" is in shadow_mode +curl localhost:8888/twoheader -H "foo: foo" -H "baz: not-so-shady" # This is subject to rate-limiting because the it's now in shadow_mode ``` Edit `examples/ratelimit/config/example.yaml` to test different rate limit configs. Hot reloading is enabled. The descriptors in `example.yaml` and the actions in `examples/envoy/proxy.yaml` should give you a good idea on how to configure rate limits. +To see the metrics in the example +```bash +# The metrics for the shadow_mode keys +curl http://localhost:9102/metrics | grep -i shadow +``` + +## Self-contained end-to-end integration test + +Integration tests are coded as bash-scripts in `integration-test/scripts`. + +The test suite will spin up a docker-compose environment from `integration-test/docker-compose-integration-test.yml` + +If the test suite fails it will exit with code 1. + +```bash +make integration-tests +``` + # Configuration ## The configuration format @@ -163,6 +189,7 @@ descriptors: rate_limit: (optional block) unit: requests_per_unit: + shadow_mode: (optional) descriptors: (optional block) - ... (nested repetition of above) ``` @@ -184,6 +211,15 @@ The rate limit block specifies the actual rate limit that will be used when ther Currently the service supports per second, minute, hour, and day limits. More types of limits may be added in the future based on user demand. +### ShadowMode +A shadow_mode key in a rule indicates that whatever the outcome of the evaluation of the rule, the end-result will always be "OK". + +When a block is in ShadowMode all functions of the rate limiting service are executed as normal, with cache-lookup and statistics + +An additional statistic is added to keep track of how many times a key with "shadow_mode" has overridden result. + +There is also a Global Shadow Mode + ### Examples #### Example 1 @@ -351,6 +387,39 @@ This can be useful for collecting statistics, or if one wants to define a descri The return value for unlimited descriptors will be an OK status code with the LimitRemaining field set to MaxUint32 value. + ### Example 6 + + A rule using shadow_mode is useful for soft-launching rate limiting. In this example + +``` +RateLimitRequest: + domain: example6 + descriptor: ("service", "auth-service"),("user", "user-a") +``` + +`user-a` of the `auth-service` would not get rate-limited regardless of the rate of requests, there would however be statistics related to the breach of the configured limit of 10 req / sec. + +`user-b` would be limited to 20 req / sec however. + +```yaml +domain: example6 +descriptors: + - key: service + descriptors: + - key: user + value: user-a + rate_limit: + requests_per_unit: 10 + unit: second + shadow_mode: true + - key: user + value: user-b + rate_limit: + requests_per_unit: 20 + unit: second +``` + + ## Loading Configuration The Ratelimit service uses a library written by Lyft called [goruntime](https://github.com/lyft/goruntime) to do configuration loading. Goruntime monitors @@ -431,6 +500,19 @@ go run main.go -domain test \ -descriptors name=foo,age=14 -descriptors name=bar,age=18 ``` +# Global ShadowMode + +There is a global shadow-mode which can make it easier to introduce rate limiting into an existing service landscape. It will override whatever result is returned by the regular rate limiting process. + +## Configuration +The global shadow mode is configured with an environment variable + +Setting environment variable`SHADOW_MODE` to `true` will enable the feature. + +## Statistics +There is an additional service-level statistics generated that will increment whenever the global shadow mode has overridden a rate limiting result. + + # Statistics The rate limit service generates various statistics for each configured rate limit rule that will be useful for end @@ -454,6 +536,7 @@ STAT: * near_limit: Number of rule hits over the NearLimit ratio threshold (currently 80%) but under the threshold rate. * over_limit: Number of rule hits exceeding the threshold rate * total_hits: Number of rule hits in total +* shadow_mode: Number of rule hits where shadow_mode would trigger and override the over_limit result To use a custom near_limit ratio threshold, you can specify with `NEAR_LIMIT_RATIO` environment variable. It defaults to `0.8` (0-1 scale). These are examples of generated stats for some configured rate limit rules from the above examples: @@ -464,6 +547,9 @@ ratelimit.service.rate_limit.mongo_cps.database_users.over_limit: 0 ratelimit.service.rate_limit.mongo_cps.database_users.total_hits: 2939 ratelimit.service.rate_limit.messaging.message_type_marketing.to_number.over_limit: 0 ratelimit.service.rate_limit.messaging.message_type_marketing.to_number.total_hits: 0 +ratelimit.service.rate_limit.messaging.auth-service.over_limit.total_hits: 1 +ratelimit.service.rate_limit.messaging.auth-service.over_limit.over_limit: 1 +ratelimit.service.rate_limit.messaging.auth-service.over_limit.shadow_mode: 1 ``` ## Statistics options diff --git a/examples/envoy/proxy.yaml b/examples/envoy/proxy.yaml index a260befe..1bf48c7e 100644 --- a/examples/envoy/proxy.yaml +++ b/examples/envoy/proxy.yaml @@ -55,6 +55,7 @@ static_resources: stage: 0 rate_limited_as_resource_exhausted: true failure_mode_deny: false + enable_x_ratelimit_headers: DRAFT_VERSION_03 rate_limit_service: grpc_service: envoy_grpc: diff --git a/examples/prom-statsd-exporter/conf.yaml b/examples/prom-statsd-exporter/conf.yaml index 4ce3f64a..f6649c2f 100644 --- a/examples/prom-statsd-exporter/conf.yaml +++ b/examples/prom-statsd-exporter/conf.yaml @@ -86,6 +86,16 @@ mappings: # Requires statsd exporter >= v0.6.0 since it uses the "drop" action. name: "ratelimit_service_config_load_error" match_metric_type: counter + - match: + "ratelimit.service.rate_limit.*.*.*.shadow_mode" + name: "ratelimit_service_rate_limit_shadow_mode" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" + + # Enable below in production once you have the metrics you need # - match: "." # match_type: "regex" diff --git a/examples/ratelimit/config/example.yaml b/examples/ratelimit/config/example.yaml index 64bacdf0..39fd61f2 100644 --- a/examples/ratelimit/config/example.yaml +++ b/examples/ratelimit/config/example.yaml @@ -27,6 +27,17 @@ descriptors: rate_limit: unit: second requests_per_unit: 1 + - key: baz + value: not-so-shady + rate_limit: + unit: minute + requests_per_unit: 3 + - key: baz + value: shady + rate_limit: + unit: minute + requests_per_unit: 3 + shadow_mode: true - key: bay rate_limit: unlimited: true diff --git a/integration-test/Dockerfile.tester b/integration-test/Dockerfile.tester new file mode 100644 index 00000000..ff4a5c8b --- /dev/null +++ b/integration-test/Dockerfile.tester @@ -0,0 +1,7 @@ +FROM alpine:latest + +USER root + +RUN apk update && apk upgrade && apk add bash curl sed grep + +ENTRYPOINT [ "bash" ] diff --git a/integration-test/docker-compose-integration-test.yml b/integration-test/docker-compose-integration-test.yml new file mode 100644 index 00000000..7cfd2eb9 --- /dev/null +++ b/integration-test/docker-compose-integration-test.yml @@ -0,0 +1,111 @@ +version: "3" +services: + redis: + image: redis:alpine + expose: + - 6379 + ports: + - 6379:6379 + networks: + - ratelimit-network + + statsd: + image: prom/statsd-exporter:v0.18.0 + entrypoint: /bin/statsd_exporter + command: + - "--statsd.mapping-config=/etc/statsd-exporter/conf.yaml" + expose: + - 9125 + - 9102 + ports: + - 9125:9125 + - 9102:9102 # Visit http://localhost:9102/metrics to see metrics in Prometheus format + networks: + - ratelimit-network + volumes: + - ./examples/prom-statsd-exporter/conf.yaml:/etc/statsd-exporter/conf.yaml + + ratelimit: + build: + context: ${PWD} + dockerfile: Dockerfile + command: /bin/ratelimit + ports: + - 8080:8080 + - 8081:8081 + - 6070:6070 + depends_on: + - redis + - statsd + networks: + - ratelimit-network + volumes: + - ./examples/ratelimit/config:/data/ratelimit/config + environment: + - USE_STATSD=true + - STATSD_HOST=statsd + - STATSD_PORT=9125 + - LOG_LEVEL=debug + - REDIS_SOCKET_TYPE=tcp + - REDIS_URL=redis:6379 + - RUNTIME_ROOT=/data + - RUNTIME_SUBDIRECTORY=ratelimit + - RUNTIME_WATCH_ROOT=false + + envoy-proxy: + image: envoyproxy/envoy-dev:latest + entrypoint: "/usr/local/bin/envoy" + command: + - "--service-node proxy" + - "--service-cluster proxy" + - "--config-path /etc/envoy/envoy.yaml" + - "--concurrency 1" + - "--mode serve" + - "--log-level info" + depends_on: + - ratelimit + volumes: + - ./examples/envoy/proxy.yaml:/etc/envoy/envoy.yaml + networks: + - ratelimit-network + expose: + - "8888" + - "8001" + ports: + - "8888:8888" + - "8001:8001" + + envoy-mock: + image: envoyproxy/envoy-dev:latest + entrypoint: "/usr/local/bin/envoy" + command: + - "--service-node mock" + - "--service-cluster mock" + - "--config-path /etc/envoy/envoy.yaml" + - "--concurrency 1" + - "--mode serve" + - "--log-level info" + volumes: + - ./examples/envoy/mock.yaml:/etc/envoy/envoy.yaml + networks: + - ratelimit-network + expose: + - "9999" + ports: + - "9999:9999" + + tester: + build: + context: ${PWD} + dockerfile: integration-test/Dockerfile.tester + depends_on: + - envoy-proxy + - envoy-mock + command: /test/run-all.sh + volumes: + - ${PWD}/integration-test/:/test/ + networks: + - ratelimit-network + +networks: + ratelimit-network: diff --git a/integration-test/run-all.sh b/integration-test/run-all.sh new file mode 100755 index 00000000..58987603 --- /dev/null +++ b/integration-test/run-all.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +echo "Running tests" + +FILES=/test/scripts/* +for f in $FILES +do + echo "Processing $f file..." + # take action on each file. $f store current file name + $f + if [ $? -ne 0 ] ; then + echo "Failed file $f" + exit 1 + fi +done \ No newline at end of file diff --git a/integration-test/scripts/simple-get.sh b/integration-test/scripts/simple-get.sh new file mode 100755 index 00000000..54233885 --- /dev/null +++ b/integration-test/scripts/simple-get.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +# Just happy path +curl -s -f -H "foo: test" -H "baz: shady" http://envoy-proxy:8888/twoheader + +if [ $? -ne 0 ] ; then + exit 1 +fi \ No newline at end of file diff --git a/integration-test/scripts/trigger-ratelimit.sh b/integration-test/scripts/trigger-ratelimit.sh new file mode 100755 index 00000000..49b36ada --- /dev/null +++ b/integration-test/scripts/trigger-ratelimit.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# +# descriptor: (foo: *), (baz: not-so-shady) +# Has rate limit quota 3 req / min +# + +response=`curl -f -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader` +response=`curl -f -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader` +response=`curl -f -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader` + +if [ $? -ne 0 ] ; then + echo "Rate limit should not trigger yet" + exit 1 +fi + +response=`curl -f -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader` + +if [ $? -eq 0 ] ; then + echo "Rate limiting should fail the request" + exit 1 +fi + +response=`curl -i -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader | grep "Too Many Requests"` +if [ $? -ne 0 ] ; then + echo "This should trigger a ratelimit" + exit 1 +fi diff --git a/integration-test/scripts/trigger-shadow-mode-key.sh b/integration-test/scripts/trigger-shadow-mode-key.sh new file mode 100755 index 00000000..b72ec7d5 --- /dev/null +++ b/integration-test/scripts/trigger-shadow-mode-key.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# +# descriptor: (foo: *), (baz: shady) +# Has rate limit quota 3 req / min +# shadow_mode is true +# + +response=`curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader` +response=`curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader` +response=`curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader` +response=`curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader` +response=`curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader` + +if [ $? -ne 0 ] ; then + echo "Shadow Mode key should not trigger an error, even if we have exceeded the quota" + exit 1 +fi + +remaining=`curl -i -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader | grep x-ratelimit-remaining | cut -d: -f2 | cut -d: -f2 | sed 's/ //g'` + +if [ $remaining == "0" ] ; then + echo "Remaining should be zero" + exit 1 +fi diff --git a/src/config/config.go b/src/config/config.go index c7f581e3..d174f100 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -16,10 +16,11 @@ func (e RateLimitConfigError) Error() string { // Wrapper for an individual rate limit config entry which includes the defined limit and stats. type RateLimit struct { - FullKey string - Stats stats.RateLimitStats - Limit *pb.RateLimitResponse_RateLimit - Unlimited bool + FullKey string + Stats stats.RateLimitStats + Limit *pb.RateLimitResponse_RateLimit + Unlimited bool + ShadowMode bool } // Interface for interacting with a loaded rate limit config. diff --git a/src/config/config_impl.go b/src/config/config_impl.go index 0664e5b2..aca94425 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -23,6 +23,7 @@ type yamlDescriptor struct { Value string RateLimit *yamlRateLimit `yaml:"rate_limit"` Descriptors []yamlDescriptor + ShadowMode bool `yaml:"shadow_mode"` } type yamlRoot struct { @@ -53,6 +54,7 @@ var validKeys = map[string]bool{ "unit": true, "requests_per_unit": true, "unlimited": true, + "shadow_mode": true, } // Create a new rate limit config entry. @@ -62,9 +64,9 @@ var validKeys = map[string]bool{ // @param unlimited supplies whether the rate limit is unlimited // @return the new config entry. func NewRateLimit( - requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stats.RateLimitStats, unlimited bool) *RateLimit { + requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stats.RateLimitStats, unlimited bool, shadowMode bool) *RateLimit { - return &RateLimit{FullKey: rlStats.GetKey(), Stats: rlStats, Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}, Unlimited: unlimited} + return &RateLimit{FullKey: rlStats.GetKey(), Stats: rlStats, Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}, Unlimited: unlimited, ShadowMode: shadowMode} } // Dump an individual descriptor for debugging purposes. @@ -72,8 +74,8 @@ func (this *rateLimitDescriptor) dump() string { ret := "" if this.limit != nil { ret += fmt.Sprintf( - "%s: unit=%s requests_per_unit=%d\n", this.limit.FullKey, - this.limit.Limit.Unit.String(), this.limit.Limit.RequestsPerUnit) + "%s: unit=%s requests_per_unit=%d, shadow_mode: %t\n", this.limit.FullKey, + this.limit.Limit.Unit.String(), this.limit.Limit.RequestsPerUnit, this.limit.ShadowMode) } for _, descriptor := range this.descriptors { ret += descriptor.dump() @@ -134,10 +136,10 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p } rateLimit = NewRateLimit( - descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), statsManager.NewStats(newParentKey), unlimited) + descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), statsManager.NewStats(newParentKey), unlimited, descriptorConfig.ShadowMode) rateLimitDebugString = fmt.Sprintf( - " ratelimit={requests_per_unit=%d, unit=%s, unlimited=%t}", rateLimit.Limit.RequestsPerUnit, - rateLimit.Limit.Unit.String(), rateLimit.Unlimited) + " ratelimit={requests_per_unit=%d, unit=%s, unlimited=%t, shadow_mode=%t}", rateLimit.Limit.RequestsPerUnit, + rateLimit.Limit.Unit.String(), rateLimit.Unlimited, rateLimit.ShadowMode) } logger.Debugf( @@ -252,10 +254,12 @@ func (this *rateLimitConfigImpl) GetLimit( if descriptor.GetLimit() != nil { rateLimitKey := descriptorKey(domain, descriptor) rateLimitOverrideUnit := pb.RateLimitResponse_RateLimit_Unit(descriptor.GetLimit().GetUnit()) + // When limit override is provided by envoy config, we don't want to enable shadow_mode rateLimit = NewRateLimit( descriptor.GetLimit().GetRequestsPerUnit(), rateLimitOverrideUnit, this.statsManager.NewStats(rateLimitKey), + false, false) return rateLimit } diff --git a/src/limiter/base_limiter.go b/src/limiter/base_limiter.go index 6a9ee927..cbdde93a 100644 --- a/src/limiter/base_limiter.go +++ b/src/limiter/base_limiter.go @@ -76,45 +76,58 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo * return this.generateResponseDescriptorStatus(pb.RateLimitResponse_OK, nil, 0) } + var responseDescriptorStatus *pb.RateLimitResponse_DescriptorStatus + over_limit := false if isOverLimitWithLocalCache { + over_limit = true limitInfo.limit.Stats.OverLimit.Add(uint64(hitsAddend)) limitInfo.limit.Stats.OverLimitWithLocalCache.Add(uint64(hitsAddend)) - return this.generateResponseDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT, - limitInfo.limit.Limit, 0) - } - var responseDescriptorStatus *pb.RateLimitResponse_DescriptorStatus - limitInfo.overLimitThreshold = limitInfo.limit.Limit.RequestsPerUnit - // The nearLimitThreshold is the number of requests that can be made before hitting the nearLimitRatio. - // We need to know it in both the OK and OVER_LIMIT scenarios. - limitInfo.nearLimitThreshold = uint32(math.Floor(float64(float32(limitInfo.overLimitThreshold) * this.nearLimitRatio))) - logger.Debugf("cache key: %s current: %d", key, limitInfo.limitAfterIncrease) - if limitInfo.limitAfterIncrease > limitInfo.overLimitThreshold { responseDescriptorStatus = this.generateResponseDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT, limitInfo.limit.Limit, 0) - - this.checkOverLimitThreshold(limitInfo, hitsAddend) - - if this.localCache != nil { - // Set the TTL of the local_cache to be the entire duration. - // Since the cache_key gets changed once the time crosses over current time slot, the over-the-limit - // cache keys in local_cache lose effectiveness. - // For example, if we have an hour limit on all mongo connections, the cache key would be - // similar to mongo_1h, mongo_2h, etc. In the hour 1 (0h0m - 0h59m), the cache key is mongo_1h, we start - // to get ratelimited in the 50th minute, the ttl of local_cache will be set as 1 hour(0h50m-1h49m). - // In the time of 1h1m, since the cache key becomes different (mongo_2h), it won't get ratelimited. - err := this.localCache.Set([]byte(key), []byte{}, int(utils.UnitToDivider(limitInfo.limit.Limit.Unit))) - if err != nil { - logger.Errorf("Failing to set local cache key: %s", key) + } else { + limitInfo.overLimitThreshold = limitInfo.limit.Limit.RequestsPerUnit + // The nearLimitThreshold is the number of requests that can be made before hitting the nearLimitRatio. + // We need to know it in both the OK and OVER_LIMIT scenarios. + limitInfo.nearLimitThreshold = uint32(math.Floor(float64(float32(limitInfo.overLimitThreshold) * this.nearLimitRatio))) + logger.Debugf("cache key: %s current: %d", key, limitInfo.limitAfterIncrease) + if limitInfo.limitAfterIncrease > limitInfo.overLimitThreshold { + over_limit = true + responseDescriptorStatus = this.generateResponseDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT, + limitInfo.limit.Limit, 0) + + this.checkOverLimitThreshold(limitInfo, hitsAddend) + + if this.localCache != nil { + // Set the TTL of the local_cache to be the entire duration. + // Since the cache_key gets changed once the time crosses over current time slot, the over-the-limit + // cache keys in local_cache lose effectiveness. + // For example, if we have an hour limit on all mongo connections, the cache key would be + // similar to mongo_1h, mongo_2h, etc. In the hour 1 (0h0m - 0h59m), the cache key is mongo_1h, we start + // to get ratelimited in the 50th minute, the ttl of local_cache will be set as 1 hour(0h50m-1h49m). + // In the time of 1h1m, since the cache key becomes different (mongo_2h), it won't get ratelimited. + err := this.localCache.Set([]byte(key), []byte{}, int(utils.UnitToDivider(limitInfo.limit.Limit.Unit))) + if err != nil { + logger.Errorf("Failing to set local cache key: %s", key) + } } + } else { + responseDescriptorStatus = this.generateResponseDescriptorStatus(pb.RateLimitResponse_OK, + limitInfo.limit.Limit, limitInfo.overLimitThreshold-limitInfo.limitAfterIncrease) + + // The limit is OK but we additionally want to know if we are near the limit. + this.checkNearLimitThreshold(limitInfo, hitsAddend) + limitInfo.limit.Stats.WithinLimit.Add(uint64(hitsAddend)) } - } else { - responseDescriptorStatus = this.generateResponseDescriptorStatus(pb.RateLimitResponse_OK, - limitInfo.limit.Limit, limitInfo.overLimitThreshold-limitInfo.limitAfterIncrease) + } - // The limit is OK but we additionally want to know if we are near the limit. - this.checkNearLimitThreshold(limitInfo, hitsAddend) - limitInfo.limit.Stats.WithinLimit.Add(uint64(hitsAddend)) + // If the limit is in ShadowMode, it should be always return OK + // We only want to increase stats if the limit was actually over the limit + if over_limit && limitInfo.limit.ShadowMode { + logger.Debugf("Limit with key %s, is in shadow_mode", limitInfo.limit.FullKey) + responseDescriptorStatus.Code = pb.RateLimitResponse_OK + limitInfo.limit.Stats.ShadowMode.Add(uint64(hitsAddend)) } + return responseDescriptorStatus } diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index d364f4ea..6997e5dc 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -1,9 +1,10 @@ package redis import ( - "github.com/envoyproxy/ratelimit/src/stats" "math/rand" + "github.com/envoyproxy/ratelimit/src/stats" + "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" @@ -53,8 +54,14 @@ func (this *fixedRateLimitCacheImpl) DoLimit( // Check if key is over the limit in local cache. if this.baseRateLimiter.IsOverLimitWithLocalCache(cacheKey.Key) { - isOverLimitWithLocalCache[i] = true - logger.Debugf("cache key is over the limit: %s", cacheKey.Key) + + if limits[i].ShadowMode { + logger.Debugf("Cache key %s would be rate limited but shadow mode is enabled on this rule", cacheKey.Key) + } else { + logger.Debugf("cache key is over the limit: %s", cacheKey.Key) + isOverLimitWithLocalCache[i] = true + } + continue } diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index b4c2f15e..67004ecf 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -9,6 +9,7 @@ import ( "github.com/envoyproxy/ratelimit/src/settings" "github.com/envoyproxy/ratelimit/src/stats" + "github.com/envoyproxy/ratelimit/src/utils" core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" @@ -41,6 +42,7 @@ type service struct { customHeaderRemainingHeader string customHeaderResetHeader string customHeaderClock utils.TimeSource + globalShadowMode bool } func (this *service) reloadConfig(statsManager stats.Manager) { @@ -72,6 +74,7 @@ func (this *service) reloadConfig(statsManager stats.Manager) { this.configLock.Lock() this.config = newConfig rlSettings := settings.NewSettings() + this.globalShadowMode = rlSettings.GlobalShadowMode if rlSettings.RateLimitResponseHeadersEnabled { this.customHeadersEnabled = true @@ -124,9 +127,10 @@ func (this *service) constructLimitsToCheck(request *pb.RateLimitRequest, ctx co logger.Debugf("descriptor is unlimited, not passing to the cache") } else { logger.Debugf( - "applying limit: %d requests per %s", + "applying limit: %d requests per %s, shadow_mode: %t", limitsToCheck[i].Limit.RequestsPerUnit, limitsToCheck[i].Limit.Unit.String(), + limitsToCheck[i].ShadowMode, ) } } @@ -195,6 +199,12 @@ func (this *service) shouldRateLimitWorker( } } + // If there is a global shadow_mode, it should always return OK + if finalCode == pb.RateLimitResponse_OVER_LIMIT && this.globalShadowMode { + finalCode = pb.RateLimitResponse_OK + this.stats.GlobalShadowMode.Inc() + } + response.OverallCode = finalCode return response } @@ -257,6 +267,7 @@ func (this *service) ShouldRateLimit( response := this.shouldRateLimitWorker(ctx, request) logger.Debugf("returning normal response") + return response, nil } @@ -267,7 +278,7 @@ func (this *service) GetCurrentConfig() config.RateLimitConfig { } func NewService(runtime loader.IFace, cache limiter.RateLimitCache, - configLoader config.RateLimitConfigLoader, statsManager stats.Manager, runtimeWatchRoot bool, clock utils.TimeSource) RateLimitServiceServer { + configLoader config.RateLimitConfigLoader, statsManager stats.Manager, runtimeWatchRoot bool, clock utils.TimeSource, shadowMode bool) RateLimitServiceServer { newService := &service{ runtime: runtime, @@ -278,6 +289,7 @@ func NewService(runtime loader.IFace, cache limiter.RateLimitCache, cache: cache, stats: statsManager.NewServiceStats(), runtimeWatchRoot: runtimeWatchRoot, + globalShadowMode: shadowMode, customHeaderClock: clock, } diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index d88d34ca..54d8d570 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -109,6 +109,7 @@ func (runner *Runner) Run() { runner.statsManager, s.RuntimeWatchRoot, utils.NewTimeSourceImpl(), + s.GlobalShadowMode, ) srv.AddDebugHttpEndpoint( diff --git a/src/settings/settings.go b/src/settings/settings.go index 7f0425db..c9afb210 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -99,6 +99,9 @@ type Settings struct { MemcacheMaxIdleConns int `envconfig:"MEMCACHE_MAX_IDLE_CONNS" default:"2"` MemcacheSrv string `envconfig:"MEMCACHE_SRV" default:""` MemcacheSrvRefresh time.Duration `envconfig:"MEMCACHE_SRV_REFRESH" default:"0"` + + // Should the ratelimiting be running in Global shadow-mode, ie. never report a ratelimit status, unless a rate was provided from envoy as an override + GlobalShadowMode bool `envconfig:"SHADOW_MODE" default:"false"` } type Option func(*Settings) diff --git a/src/stats/manager.go b/src/stats/manager.go index 6c54e474..cab7dc33 100644 --- a/src/stats/manager.go +++ b/src/stats/manager.go @@ -1,7 +1,9 @@ package stats -import stats "github.com/lyft/gostats" -import gostats "github.com/lyft/gostats" +import ( + gostats "github.com/lyft/gostats" + stats "github.com/lyft/gostats" +) // Manager is the interface that wraps initialization of stat structures. type Manager interface { @@ -38,6 +40,7 @@ type ServiceStats struct { ConfigLoadSuccess gostats.Counter ConfigLoadError gostats.Counter ShouldRateLimit ShouldRateLimitStats + GlobalShadowMode gostats.Counter } // Stats for an individual rate limit config entry. @@ -48,4 +51,5 @@ type RateLimitStats struct { NearLimit gostats.Counter OverLimitWithLocalCache gostats.Counter WithinLimit gostats.Counter + ShadowMode gostats.Counter } diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index e7b6a0b1..7748dde7 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -32,6 +32,7 @@ func (this *ManagerImpl) NewStats(key string) RateLimitStats { ret.NearLimit = this.rlStatsScope.NewCounter(key + ".near_limit") ret.OverLimitWithLocalCache = this.rlStatsScope.NewCounter(key + ".over_limit_with_local_cache") ret.WithinLimit = this.rlStatsScope.NewCounter(key + ".within_limit") + ret.ShadowMode = this.rlStatsScope.NewCounter(key + ".shadow_mode") return ret } @@ -47,6 +48,7 @@ func (this *ManagerImpl) NewServiceStats() ServiceStats { ret.ConfigLoadSuccess = this.serviceStatsScope.NewCounter("config_load_success") ret.ConfigLoadError = this.serviceStatsScope.NewCounter("config_load_error") ret.ShouldRateLimit = this.NewShouldRateLimitStats() + ret.GlobalShadowMode = this.serviceStatsScope.NewCounter("global_shadow_mode") return ret } diff --git a/test/config/config_test.go b/test/config/config_test.go index d6d79c5e..3caaf2cc 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -1,16 +1,17 @@ package config_test import ( - "github.com/envoyproxy/ratelimit/test/common" "io/ioutil" "testing" + "github.com/envoyproxy/ratelimit/test/common" + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" pb_type "github.com/envoyproxy/go-control-plane/envoy/type/v3" "github.com/envoyproxy/ratelimit/src/config" mockstats "github.com/envoyproxy/ratelimit/test/mocks/stats" - "github.com/lyft/gostats" + stats "github.com/lyft/gostats" "github.com/stretchr/testify/assert" ) @@ -386,3 +387,79 @@ func TestUnlimitedWithRateLimitUnit(t *testing.T) { }, "unlimited_with_unit.yaml: should not specify rate limit unit when unlimited") } + +func TestShadowModeConfig(t *testing.T) { + assert := assert.New(t) + stats := stats.NewStore(stats.NewNullSink(), false) + + rlConfig := config.NewRateLimitConfigImpl(loadFile("shadowmode_config.yaml"), mockstats.NewMockStatManager(stats)) + rlConfig.Dump() + + rl := rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key1", Value: "value1"}, {Key: "subkey1", Value: "something"}}, + }) + rl.Stats.TotalHits.Inc() + rl.Stats.OverLimit.Inc() + rl.Stats.NearLimit.Inc() + + assert.Equal(rl.ShadowMode, false) + assert.EqualValues(5, rl.Limit.RequestsPerUnit) + assert.Equal(pb.RateLimitResponse_RateLimit_SECOND, rl.Limit.Unit) + assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1.total_hits").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1.over_limit").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1.near_limit").Value()) + assert.EqualValues(0, stats.NewCounter("test-domain.key1_value1.subkey1.shadow_mode").Value()) + + rl = rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key1", Value: "value1"}, {Key: "subkey1", Value: "subvalue1"}}, + }) + rl.Stats.TotalHits.Inc() + rl.Stats.OverLimit.Inc() + rl.Stats.NearLimit.Inc() + rl.Stats.ShadowMode.Inc() + + assert.Equal(rl.ShadowMode, true) + assert.EqualValues(10, rl.Limit.RequestsPerUnit) + assert.Equal(pb.RateLimitResponse_RateLimit_SECOND, rl.Limit.Unit) + assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_subvalue1.total_hits").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_subvalue1.over_limit").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_subvalue1.near_limit").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_subvalue1.shadow_mode").Value()) + + rl = rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key2", Value: "something"}}, + }) + rl.Stats.TotalHits.Inc() + rl.Stats.OverLimit.Inc() + rl.Stats.NearLimit.Inc() + rl.Stats.ShadowMode.Inc() + assert.Equal(rl.ShadowMode, true) + assert.EqualValues(20, rl.Limit.RequestsPerUnit) + assert.Equal(pb.RateLimitResponse_RateLimit_MINUTE, rl.Limit.Unit) + assert.EqualValues(1, stats.NewCounter("test-domain.key2.total_hits").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key2.over_limit").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key2.near_limit").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key2.shadow_mode").Value()) + + rl = rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key2", Value: "value2"}}, + }) + rl.Stats.TotalHits.Inc() + rl.Stats.OverLimit.Inc() + rl.Stats.NearLimit.Inc() + assert.Equal(rl.ShadowMode, false) + assert.EqualValues(30, rl.Limit.RequestsPerUnit) + assert.Equal(pb.RateLimitResponse_RateLimit_MINUTE, rl.Limit.Unit) + assert.EqualValues(1, stats.NewCounter("test-domain.key2_value2.total_hits").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key2_value2.over_limit").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key2_value2.near_limit").Value()) + assert.EqualValues(0, stats.NewCounter("test-domain.key2_value2.shadow_mode").Value()) +} diff --git a/test/config/shadowmode_config.yaml b/test/config/shadowmode_config.yaml new file mode 100644 index 00000000..5b7bb904 --- /dev/null +++ b/test/config/shadowmode_config.yaml @@ -0,0 +1,33 @@ +# Basic configuration for testing. +domain: test-domain +descriptors: + # Top level key/value with no default rate limit. + - key: key1 + value: value1 + descriptors: + # 2nd level key only with default rate limit. + - key: subkey1 + rate_limit: + unit: second + requests_per_unit: 5 + + # 2nd level key/value with limit. Specific override at 2nd level. + - key: subkey1 + value: subvalue1 + rate_limit: + unit: second + requests_per_unit: 10 + shadow_mode: true + + # Top level key only with default rate limit. + - key: key2 + rate_limit: + unit: minute + requests_per_unit: 20 + shadow_mode: true + # Top level key/value with limit. Specific override at 1st level. + - key: key2 + value: value2 + rate_limit: + unit: minute + requests_per_unit: 30 diff --git a/test/limiter/base_limiter_test.go b/test/limiter/base_limiter_test.go index 97fc97db..76e9df65 100644 --- a/test/limiter/base_limiter_test.go +++ b/test/limiter/base_limiter_test.go @@ -1,10 +1,11 @@ package limiter import ( - mockstats "github.com/envoyproxy/ratelimit/test/mocks/stats" "math/rand" "testing" + mockstats "github.com/envoyproxy/ratelimit/test/mocks/stats" + "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" @@ -27,7 +28,7 @@ func TestGenerateCacheKeys(t *testing.T) { timeSource.EXPECT().UnixNow().Return(int64(1234)) baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value()) cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, 1) assert.Equal(1, len(cacheKeys)) @@ -46,7 +47,7 @@ func TestGenerateCacheKeysPrefix(t *testing.T) { timeSource.EXPECT().UnixNow().Return(int64(1234)) baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "prefix:", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value()) cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, 1) assert.Equal(1, len(cacheKeys)) @@ -100,7 +101,7 @@ func TestGetResponseStatusOverLimitWithLocalCache(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 4, 5) // As `isOverLimitWithLocalCache` is passed as `true`, immediate response is returned with no checks of the limits. responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, true, 2) @@ -109,6 +110,32 @@ func TestGetResponseStatusOverLimitWithLocalCache(t *testing.T) { assert.Equal(limits[0].Limit, responseStatus.GetCurrentLimit()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimitWithLocalCache.Value()) + // No shadow_mode so, no stats change + assert.Equal(uint64(0), limits[0].Stats.ShadowMode.Value()) +} + +func TestGetResponseStatusOverLimitWithLocalCacheShadowMode(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + timeSource := mock_utils.NewMockTimeSource(controller) + timeSource.EXPECT().UnixNow().Return(int64(1234)) + statsStore := stats.NewStore(stats.NewNullSink(), false) + sm := mockstats.NewMockStatManager(statsStore) + baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) + // This limit is in ShadowMode + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true)} + limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 4, 5) + // As `isOverLimitWithLocalCache` is passed as `true`, immediate response is returned with no checks of the limits. + responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, true, 2) + // Limit is reached, but response is still OK due to ShadowMode + assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) + assert.Equal(uint32(0), responseStatus.GetLimitRemaining()) + assert.Equal(limits[0].Limit, responseStatus.GetCurrentLimit()) + assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) + // ShadowMode statistics should also be updated + assert.Equal(uint64(2), limits[0].Stats.ShadowMode.Value()) + assert.Equal(uint64(2), limits[0].Stats.OverLimitWithLocalCache.Value()) } func TestGetResponseStatusOverLimit(t *testing.T) { @@ -121,7 +148,7 @@ func TestGetResponseStatusOverLimit(t *testing.T) { localCache := freecache.NewCache(100) sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, localCache, 0.8, "", sm) - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 7, 4, 5) responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) assert.Equal(pb.RateLimitResponse_OVER_LIMIT, responseStatus.GetCode()) @@ -132,6 +159,32 @@ func TestGetResponseStatusOverLimit(t *testing.T) { assert.Equal("", string(result)) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) + // No shadow_mode so, no stats change + assert.Equal(uint64(0), limits[0].Stats.ShadowMode.Value()) +} + +func TestGetResponseStatusOverLimitShadowMode(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + timeSource := mock_utils.NewMockTimeSource(controller) + timeSource.EXPECT().UnixNow().Return(int64(1234)) + statsStore := stats.NewStore(stats.NewNullSink(), false) + localCache := freecache.NewCache(100) + sm := mockstats.NewMockStatManager(statsStore) + baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, localCache, 0.8, "", sm) + // Key is in shadow_mode: true + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true)} + limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 7, 4, 5) + responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) + assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) + assert.Equal(uint32(0), responseStatus.GetLimitRemaining()) + assert.Equal(limits[0].Limit, responseStatus.GetCurrentLimit()) + result, _ := localCache.Get([]byte("key")) + // Local cache should have been populated with over the limit key. + assert.Equal("", string(result)) + assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) } func TestGetResponseStatusBelowLimit(t *testing.T) { @@ -143,7 +196,29 @@ func TestGetResponseStatusBelowLimit(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} + limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 9, 10) + responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) + assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) + assert.Equal(uint32(4), responseStatus.GetLimitRemaining()) + assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) + assert.Equal(limits[0].Limit, responseStatus.GetCurrentLimit()) + assert.Equal(uint64(1), limits[0].Stats.WithinLimit.Value()) + // No shadow_mode so, no stats change + assert.Equal(uint64(0), limits[0].Stats.ShadowMode.Value()) + +} + +func TestGetResponseStatusBelowLimitShadowMode(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + timeSource := mock_utils.NewMockTimeSource(controller) + timeSource.EXPECT().UnixNow().Return(int64(1234)) + statsStore := stats.NewStore(stats.NewNullSink(), false) + sm := mockstats.NewMockStatManager(statsStore) + baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 9, 10) responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) @@ -151,4 +226,7 @@ func TestGetResponseStatusBelowLimit(t *testing.T) { assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) assert.Equal(limits[0].Limit, responseStatus.GetCurrentLimit()) assert.Equal(uint64(1), limits[0].Stats.WithinLimit.Value()) + // No shadow_mode so, no stats change + assert.Equal(uint64(0), limits[0].Stats.ShadowMode.Value()) + } diff --git a/test/memcached/cache_impl_test.go b/test/memcached/cache_impl_test.go index 2b69ecfe..f29a6295 100644 --- a/test/memcached/cache_impl_test.go +++ b/test/memcached/cache_impl_test.go @@ -47,7 +47,7 @@ func TestMemcached(t *testing.T) { client.EXPECT().Increment("domain_key_value_1234", uint64(1)).Return(uint64(5), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -71,7 +71,7 @@ func TestMemcached(t *testing.T) { }, 1) limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}}, @@ -101,8 +101,8 @@ func TestMemcached(t *testing.T) { {{"key3", "value3"}, {"subkey3", "subvalue3"}}, }, 1) limits = []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, @@ -138,7 +138,7 @@ func TestMemcachedGetError(t *testing.T) { client.EXPECT().Increment("domain_key_value_1234", uint64(1)).Return(uint64(5), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -156,7 +156,7 @@ func TestMemcachedGetError(t *testing.T) { client.EXPECT().Increment("domain_key_value1_1234", uint64(1)).Return(uint64(5), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value1"}}}, 1) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value1"), false)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value1"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -229,7 +229,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -325,7 +325,7 @@ func TestNearLimit(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -378,7 +378,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key5_value5_1234", uint64(3)).Return(uint64(5), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"), false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -396,7 +396,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key6_value6_1234", uint64(2)).Return(uint64(7), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) - limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"), false)} + limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -414,7 +414,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key7_value7_1234", uint64(3)).Return(uint64(19), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"), false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -432,7 +432,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key8_value8_1234", uint64(3)).Return(uint64(22), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"), false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -450,7 +450,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key9_value9_1234", uint64(7)).Return(uint64(22), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"), false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -468,7 +468,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key10_value10_1234", uint64(3)).Return(uint64(30), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"), false)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -512,7 +512,7 @@ func TestMemcacheWithJitter(t *testing.T) { ).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -555,7 +555,7 @@ func TestMemcacheAdd(t *testing.T) { uint64(2), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -579,7 +579,7 @@ func TestMemcacheAdd(t *testing.T) { ).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key2", "value2"}}}, 1) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2"), false)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, diff --git a/test/metrics/metrics_test.go b/test/metrics/metrics_test.go index 318db259..04ffbd68 100644 --- a/test/metrics/metrics_test.go +++ b/test/metrics/metrics_test.go @@ -2,13 +2,14 @@ package metrics import ( "context" + "testing" + "time" + "github.com/envoyproxy/ratelimit/src/metrics" stats "github.com/lyft/gostats" statsMock "github.com/lyft/gostats/mock" "github.com/stretchr/testify/assert" "google.golang.org/grpc" - "testing" - "time" ) func TestMetricsInterceptor(t *testing.T) { diff --git a/test/mocks/stats/manager.go b/test/mocks/stats/manager.go index cbf5bbf1..c0045b14 100644 --- a/test/mocks/stats/manager.go +++ b/test/mocks/stats/manager.go @@ -27,6 +27,7 @@ func (m *MockStatManager) NewServiceStats() stats.ServiceStats { ret.ConfigLoadSuccess = m.store.NewCounter("config_load_success") ret.ConfigLoadError = m.store.NewCounter("config_load_error") ret.ShouldRateLimit = m.NewShouldRateLimitStats() + ret.GlobalShadowMode = m.store.NewCounter("global_shadow_mode") return ret } @@ -39,6 +40,8 @@ func (m *MockStatManager) NewStats(key string) stats.RateLimitStats { ret.NearLimit = m.store.NewCounter(key + ".near_limit") ret.OverLimitWithLocalCache = m.store.NewCounter(key + ".over_limit_with_local_cache") ret.WithinLimit = m.store.NewCounter(key + ".within_limit") + ret.ShadowMode = m.store.NewCounter(key + ".shadow_mode") + return ret } diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index bfb003ba..b5376e63 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -49,7 +49,7 @@ func BenchmarkParallelDoLimit(b *testing.B) { cache := redis.NewFixedRateLimitCacheImpl(client, nil, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, "", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} + limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} // wait for the pool to fill up for { diff --git a/test/redis/fixed_cache_impl_test.go b/test/redis/fixed_cache_impl_test.go index 4b9575a1..b13cd946 100644 --- a/test/redis/fixed_cache_impl_test.go +++ b/test/redis/fixed_cache_impl_test.go @@ -64,7 +64,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -89,7 +89,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { }, 1) limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}}, @@ -116,8 +116,8 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { {{"key3", "value3"}, {"subkey3", "subvalue3"}}, }, 1) limits = []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, @@ -194,7 +194,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -289,7 +289,7 @@ func TestNearLimit(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -341,7 +341,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"), false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -358,7 +358,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) - limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"), false)} + limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -375,7 +375,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"), false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -392,7 +392,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"), false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -409,7 +409,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"), false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -426,7 +426,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"), false)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -456,7 +456,7 @@ func TestRedisWithJitter(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -466,3 +466,105 @@ func TestRedisWithJitter(t *testing.T) { assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) assert.Equal(uint64(1), limits[0].Stats.WithinLimit.Value()) } + +func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + client := mock_redis.NewMockClient(controller) + timeSource := mock_utils.NewMockTimeSource(controller) + localCache := freecache.NewCache(100) + statsStore := gostats.NewStore(gostats.NewNullSink(), false) + sm := stats.NewMockStatManager(statsStore) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8, "", sm) + sink := &common.TestStatSink{} + localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) + + // Test Near Limit Stats. Under Near Limit Ratio + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + + request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) + + limits := []*config.RateLimit{ + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, true)} + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) + assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) + assert.Equal(uint64(1), limits[0].Stats.WithinLimit.Value()) + + // Check the local cache stats. + testLocalCacheStats(localCacheStats, statsStore, sink, 0, 1, 1, 0, 0) + + // Test Near Limit Stats. At Near Limit Ratio, still OK + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) + assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) + assert.Equal(uint64(2), limits[0].Stats.WithinLimit.Value()) + + // Check the local cache stats. + testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 2, 0, 0) + + // Test Over limit stats + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(16)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + + // The result should be OK since limit is in ShadowMode + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) + assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) + assert.Equal(uint64(2), limits[0].Stats.WithinLimit.Value()) + + // Check the local cache stats. + testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 3, 0, 1) + + // Test Over limit stats with local cache + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).Times(0) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key4_value4_997200", int64(3600)).Times(0) + + // The result should be OK since limit is in ShadowMode + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + cache.DoLimit(nil, request, limits)) + // TODO: How should we handle statistics? Should there be a separate ShadowMode statistics? Should the other Stats remain as if they were unaffected by shadowmode? + assert.Equal(uint64(4), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) + assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) + assert.Equal(uint64(3), limits[0].Stats.WithinLimit.Value()) + + // Check the local cache stats. + testLocalCacheStats(localCacheStats, statsStore, sink, 1, 3, 4, 0, 1) +} diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index 6c38583a..77e5bf95 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/envoyproxy/ratelimit/src/stats" + "github.com/envoyproxy/ratelimit/src/utils" core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" @@ -98,7 +99,7 @@ func (this *rateLimitServiceTestSuite) setupBasicService() ratelimit.RateLimitSe this.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Return(this.config) - return ratelimit.NewService(this.runtime, this.cache, this.configLoader, this.statsManager, true, MockClock{now: int64(2222)}) + return ratelimit.NewService(this.runtime, this.cache, this.configLoader, this.statsManager, true, MockClock{now: int64(2222)}, false) } func TestService(test *testing.T) { @@ -133,7 +134,7 @@ func TestService(test *testing.T) { request = common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), nil} t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) @@ -165,7 +166,7 @@ func TestService(test *testing.T) { // Config should still be valid. Also make sure order does not affect results. limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false)} t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) t.cache.EXPECT().DoLimit(nil, request, limits).Return( @@ -185,6 +186,125 @@ func TestService(test *testing.T) { t.assert.EqualValues(2, t.statStore.NewCounter("config_load_success").Value()) t.assert.EqualValues(1, t.statStore.NewCounter("config_load_error").Value()) + t.assert.EqualValues(0, t.statStore.NewCounter("global_shadow_mode").Value()) +} + +func TestServiceGlobalShadowMode(test *testing.T) { + os.Setenv("SHADOW_MODE", "true") + defer func() { + os.Unsetenv("SHADOW_MODE") + }() + + t := commonSetup(test) + defer t.controller.Finish() + + // No global shadow_mode, this should be picked-up from environment variables during re-load of config + service := t.setupBasicService() + + // Force a config reload. + barrier := newBarrier() + t.configLoader.EXPECT().Load( + []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( + func([]config.RateLimitConfigToLoad, stats.Manager) { barrier.signal() }).Return(t.config) + t.runtimeUpdateCallback <- 1 + barrier.wait() + + // Make a request. + request := common.NewRateLimitRequest( + "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) + + // Global Shadow mode + limits := []*config.RateLimit{ + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), + nil} + t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) + t.cache.EXPECT().DoLimit(nil, request, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) + response, err := service.ShouldRateLimit(nil, request) + + // OK overall code even if limit response was OVER_LIMIT + common.AssertProtoEqual( + t.assert, + &pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OK, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + }}, + response) + t.assert.Nil(err) + + t.assert.EqualValues(1, t.statStore.NewCounter("global_shadow_mode").Value()) + t.assert.EqualValues(2, t.statStore.NewCounter("config_load_success").Value()) + t.assert.EqualValues(0, t.statStore.NewCounter("config_load_error").Value()) +} + +func TestRuleShadowMode(test *testing.T) { + t := commonSetup(test) + defer t.controller.Finish() + + // No Global Shadowmode + service := t.setupBasicService() + + request := common.NewRateLimitRequest( + "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) + limits := []*config.RateLimit{ + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true)} + t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) + t.cache.EXPECT().DoLimit(nil, request, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) + response, err := service.ShouldRateLimit(nil, request) + t.assert.Equal( + &pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OK, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + }}, + response) + t.assert.Nil(err) + + t.assert.EqualValues(0, t.statStore.NewCounter("global_shadow_mode").Value()) +} + +func TestMixedRuleShadowMode(test *testing.T) { + t := commonSetup(test) + defer t.controller.Finish() + service := t.setupBasicService() + + request := common.NewRateLimitRequest( + "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) + limits := []*config.RateLimit{ + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false)} + t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) + testResults := []pb.RateLimitResponse_Code{pb.RateLimitResponse_OVER_LIMIT, pb.RateLimitResponse_OVER_LIMIT} + for i := 0; i < len(limits); i++ { + if limits[i].ShadowMode { + testResults[i] = pb.RateLimitResponse_OK + } + } + t.cache.EXPECT().DoLimit(nil, request, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{{Code: testResults[0], CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: testResults[1], CurrentLimit: nil, LimitRemaining: 0}}) + response, err := service.ShouldRateLimit(nil, request) + t.assert.Equal( + &pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OVER_LIMIT, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: nil, LimitRemaining: 0}, + }}, + response) + t.assert.Nil(err) + + t.assert.EqualValues(0, t.statStore.NewCounter("global_shadow_mode").Value()) } func TestServiceWithCustomRatelimitHeaders(test *testing.T) { @@ -215,7 +335,7 @@ func TestServiceWithCustomRatelimitHeaders(test *testing.T) { request := common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), nil} t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) @@ -264,7 +384,7 @@ func TestServiceWithDefaultRatelimitHeaders(test *testing.T) { request := common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), nil} t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) @@ -321,7 +441,7 @@ func TestCacheError(test *testing.T) { service := t.setupBasicService() request := common.NewRateLimitRequest("different-domain", [][][2]string{{{"foo", "bar"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false)} t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.cache.EXPECT().DoLimit(nil, request, limits).Do( func(context.Context, *pb.RateLimitRequest, []*config.RateLimit) { @@ -348,7 +468,7 @@ func TestInitialLoadError(test *testing.T) { func([]config.RateLimitConfigToLoad, stats.Manager) { panic(config.RateLimitConfigError("load error")) }) - service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statsManager, true, t.mockClock) + service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statsManager, true, t.mockClock, false) request := common.NewRateLimitRequest("test-domain", [][][2]string{{{"hello", "world"}}}, 1) response, err := service.ShouldRateLimit(nil, request) @@ -365,9 +485,9 @@ func TestUnlimited(test *testing.T) { request := common.NewRateLimitRequest( "some-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}, {{"baz", "qux"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("foo_bar"), false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("foo_bar"), false, false), nil, - config.NewRateLimit(55, pb.RateLimitResponse_RateLimit_SECOND, t.statsManager.NewStats("baz_qux"), true)} + config.NewRateLimit(55, pb.RateLimitResponse_RateLimit_SECOND, t.statsManager.NewStats("baz_qux"), true, false)} t.config.EXPECT().GetLimit(nil, "some-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "some-domain", request.Descriptors[1]).Return(limits[1]) t.config.EXPECT().GetLimit(nil, "some-domain", request.Descriptors[2]).Return(limits[2]) From 21fc4d37b8ca3ce5ceaa3c46705ef7aebaef5907 Mon Sep 17 00:00:00 2001 From: Yuki Sawa Date: Tue, 5 Oct 2021 09:12:42 -0700 Subject: [PATCH 026/181] CI: Add pre-commit linters, autoformatters (#299) * Add pre-commit linters, formatters Signed-off-by: Yuki Sawa * add descriptive name for fail msg step Signed-off-by: Yuki Sawa * shorten precommit makefile cmd Signed-off-by: Yuki Sawa * make fix_format Signed-off-by: Yuki Sawa * autoformat all lint errs Signed-off-by: Yuki Sawa * backtick quote to avoid formatter Signed-off-by: Yuki Sawa * add shfmt for bash linting Signed-off-by: Yuki Sawa --- .github/workflows/main.yaml | 20 +++ .github/workflows/pullrequest.yaml | 30 +++- .github/workflows/release.yaml | 4 +- .github/workflows/stale.yml | 64 ++++----- .pre-commit-config.yaml | 31 +++++ .prettierrc.yaml | 1 + CONTRIBUTING.md | 21 ++- Makefile | 12 +- OWNERS.md | 4 +- README.md | 131 ++++++++++-------- docker-compose-example.yml | 12 +- docker-compose.yml | 3 +- examples/prom-statsd-exporter/conf.yaml | 28 ++-- .../docker-compose-integration-test.yml | 12 +- integration-test/run-all.sh | 19 ++- integration-test/scripts/simple-get.sh | 6 +- integration-test/scripts/trigger-ratelimit.sh | 28 ++-- .../scripts/trigger-shadow-mode-key.sh | 24 ++-- requirements-dev.txt | 1 + script/docs_check_format | 8 +- script/docs_fix_format | 2 +- script/lint | 5 +- src/config/config.go | 3 +- src/config/config_impl.go | 4 +- src/config_check_cmd/main.go | 8 +- src/limiter/base_limiter.go | 9 +- src/limiter/cache.go | 3 +- src/limiter/cache_key.go | 4 +- src/memcached/cache_impl.go | 3 +- src/metrics/metrics.go | 3 +- src/redis/cache_impl.go | 1 + src/redis/fixed_cache_impl.go | 5 +- src/server/server_impl.go | 19 ++- src/service/ratelimit.go | 9 +- src/service_cmd/runner/runner.go | 3 +- src/srv/srv.go | 2 - src/stats/manager_impl.go | 3 +- test/common/common.go | 1 - test/config/config_test.go | 6 +- test/config/duplicate_key.yaml | 2 +- test/config/misspelled_key.yaml | 2 +- test/config/misspelled_key2.yaml | 2 +- test/config/non_map_list.yaml | 2 +- test/integration/integration_test.go | 35 +++-- test/limiter/base_limiter_test.go | 9 +- test/memcached/cache_impl_test.go | 53 ++++--- .../memcached/stats_collecting_client_test.go | 5 +- test/metrics/metrics_test.go | 3 +- test/mocks/config/config.go | 6 +- test/mocks/limiter/limiter.go | 6 +- test/mocks/memcached/client.go | 3 +- test/mocks/redis/redis.go | 6 +- test/mocks/rls/rls.go | 3 +- test/mocks/runtime/loader/loader.go | 3 +- test/mocks/runtime/snapshot/snapshot.go | 5 +- test/mocks/stats/manager.go | 3 +- test/mocks/utils/utils.go | 3 +- test/redis/bench_test.go | 6 +- test/redis/driver_impl_test.go | 3 +- test/redis/fixed_cache_impl_test.go | 66 ++++++--- test/server/health_test.go | 4 +- test/server/server_impl_test.go | 10 +- test/service/ratelimit_test.go | 96 ++++++++----- test/srv/srv_test.go | 3 +- 64 files changed, 552 insertions(+), 339 deletions(-) create mode 100644 .pre-commit-config.yaml create mode 100644 .prettierrc.yaml create mode 100644 requirements-dev.txt diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 54617022..ae305019 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -12,6 +12,7 @@ jobs: - uses: actions/checkout@v2 - name: check format run: make check_format + build: runs-on: ubuntu-latest steps: @@ -25,3 +26,22 @@ jobs: env: DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + precommits: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - uses: actions/setup-python@v2 + with: + python-version: "3.9" + + - uses: actions/setup-go@v2 + with: + go-version: "1.16" + + - name: run pre-commits + run: | + make precommit_install + pre-commit run -a diff --git a/.github/workflows/pullrequest.yaml b/.github/workflows/pullrequest.yaml index da03fd13..164a61a4 100644 --- a/.github/workflows/pullrequest.yaml +++ b/.github/workflows/pullrequest.yaml @@ -6,10 +6,13 @@ on: jobs: check: runs-on: ubuntu-latest + steps: - uses: actions/checkout@v2 + - name: check format run: make check_format + build: runs-on: ubuntu-latest @@ -17,5 +20,30 @@ jobs: - uses: actions/checkout@v2 - name: build and test + run: make docker_tests + + precommits: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - uses: actions/setup-python@v2 + with: + python-version: "3.9" + + - uses: actions/setup-go@v2 + with: + go-version: "1.16" + + - name: run pre-commits + run: | + make precommit_install + pre-commit run -a + + # If previous stage fails, print resolution steps + - if: ${{ failure() }} + name: Read for resolution steps run: | - make docker_tests + echo "Pre-commits failed! Run 'make precommit_install' then 'pre-commits run -a' to fix." + exit 1 diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index e2349f99..bcfc82b1 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -3,7 +3,7 @@ name: Build and push :release image on: push: tags: - - 'v*' + - "v*" jobs: check: @@ -23,4 +23,4 @@ jobs: make docker_push env: DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} - DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} \ No newline at end of file + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 3c332fad..72a49005 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -1,7 +1,7 @@ on: workflow_dispatch: schedule: - - cron: '0 */4 * * *' + - cron: "0 */4 * * *" jobs: prune_stale: @@ -9,34 +9,34 @@ jobs: runs-on: ubuntu-latest steps: - - name: Prune Stale - uses: actions/stale@v3.0.14 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - # Different amounts of days for issues/PRs are not currently supported but there is a PR - # open for it: https://github.com/actions/stale/issues/214 - days-before-stale: 30 - days-before-close: 7 - stale-issue-message: > - This issue has been automatically marked as stale because it has not had activity in the - last 30 days. It will be closed in the next 7 days unless it is tagged "help wanted" or "no stalebot" or other activity - occurs. Thank you for your contributions. - close-issue-message: > - This issue has been automatically closed because it has not had activity in the - last 37 days. If this issue is still valid, please ping a maintainer and ask them to label it as "help wanted" or "no stalebot". - Thank you for your contributions. - stale-pr-message: > - This pull request has been automatically marked as stale because it has not had - activity in the last 30 days. It will be closed in 7 days if no further activity occurs. Please - feel free to give a status update now, ping for review, or re-open when it's ready. - Thank you for your contributions! - close-pr-message: > - This pull request has been automatically closed because it has not had - activity in the last 37 days. Please feel free to give a status update now, ping for review, or re-open when it's ready. - Thank you for your contributions! - stale-issue-label: 'stale' - exempt-issue-labels: 'no stalebot,help wanted' - stale-pr-label: 'stale' - exempt-pr-labels: 'no stalebot' - operations-per-run: 500 - ascending: true \ No newline at end of file + - name: Prune Stale + uses: actions/stale@v3.0.14 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + # Different amounts of days for issues/PRs are not currently supported but there is a PR + # open for it: https://github.com/actions/stale/issues/214 + days-before-stale: 30 + days-before-close: 7 + stale-issue-message: > + This issue has been automatically marked as stale because it has not had activity in the + last 30 days. It will be closed in the next 7 days unless it is tagged "help wanted" or "no stalebot" or other activity + occurs. Thank you for your contributions. + close-issue-message: > + This issue has been automatically closed because it has not had activity in the + last 37 days. If this issue is still valid, please ping a maintainer and ask them to label it as "help wanted" or "no stalebot". + Thank you for your contributions. + stale-pr-message: > + This pull request has been automatically marked as stale because it has not had + activity in the last 30 days. It will be closed in 7 days if no further activity occurs. Please + feel free to give a status update now, ping for review, or re-open when it's ready. + Thank you for your contributions! + close-pr-message: > + This pull request has been automatically closed because it has not had + activity in the last 37 days. Please feel free to give a status update now, ping for review, or re-open when it's ready. + Thank you for your contributions! + stale-issue-label: "stale" + exempt-issue-labels: "no stalebot,help wanted" + stale-pr-label: "stale" + exempt-pr-labels: "no stalebot" + operations-per-run: 500 + ascending: true diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..f23dc042 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,31 @@ +default_language_version: + python: python3 +repos: + - repo: https://github.com/tekwizely/pre-commit-golang + rev: v1.0.0-beta.4 + hooks: + - id: go-imports + args: ["-w", "-local", "github.com/envoyproxy/ratelimit"] + - id: go-fumpt + args: ["-w"] + + - repo: https://github.com/pre-commit/mirrors-prettier + rev: "v2.4.1" + hooks: + - id: prettier + exclude: "test/config/bad_yaml.yaml" + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.0.1 + hooks: + - id: check-added-large-files + - id: check-case-conflict + - id: check-json + - id: check-merge-conflict + - id: end-of-file-fixer + - id: trailing-whitespace + + - repo: https://github.com/jumanjihouse/pre-commit-hooks + rev: 2.1.5 + hooks: + - id: shfmt diff --git a/.prettierrc.yaml b/.prettierrc.yaml new file mode 100644 index 00000000..abeaf738 --- /dev/null +++ b/.prettierrc.yaml @@ -0,0 +1 @@ +singleQuote: false diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5d641c98..9d43e10f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,15 +2,26 @@ We welcome contributions from the community. Here are some guidelines. # Coding style -* Ratelimit uses golang's `fmt` too. +- Ratelimit uses [gofumpt](https://github.com/mvdan/gofumpt) and [goimports](https://pkg.go.dev/golang.org/x/tools/cmd/goimports) for Go styling. # Submitting a PR -* Fork the repo and create your PR. -* Tests will automatically run for you. -* When all of the tests are passing, tag @envoyproxy/ratelimit-maintainers and +- Fork the repo. +- Before commiting any code, install the pre-commits by: + +```bash +make precommit_install +# Example usage if you want to run it manually +pre-commit run # Run against staged changes +pre-commit run -a # Run against all files +``` + +- Pre-commits will automatically format your code at commit time. +- Create your PR. +- Tests will automatically run for you. +- When all of the tests are passing, tag @envoyproxy/ratelimit-maintainers and we will review it and merge. -* Party time. +- Party time. # DCO: Sign your work diff --git a/Makefile b/Makefile index 739d8757..55fd1536 100644 --- a/Makefile +++ b/Makefile @@ -114,8 +114,14 @@ docker_image: docker_tests docker_push: docker_image docker push $(IMAGE):$(VERSION) -.PHONY: integration-tests -integration-tests: +.PHONY: integration_tests +integration_tests: docker-compose --project-dir $(PWD) -f integration-test/docker-compose-integration-test.yml up --build --exit-code-from tester -# docker-compose --project-dir $(PWD) -f integration-test/docker-compose-integration-test.yml up --build --exit-code-from tester +.PHONY: precommit_install +precommit_install: + python3 -m pip install -r requirements-dev.txt + go install mvdan.cc/gofumpt@v0.1.1 + go install mvdan.cc/sh/v3/cmd/shfmt@latest + go install golang.org/x/tools/cmd/goimports@v0.1.7 + pre-commit install diff --git a/OWNERS.md b/OWNERS.md index ac3a5660..a371de23 100644 --- a/OWNERS.md +++ b/OWNERS.md @@ -1,2 +1,2 @@ -* Matt Klein ([mattklein123](https://github.com/mattklein123)) (mklein@lyft.com) -* Yuki Sawa ([ysawa0](https://github.com/ysawa0)) (yukisawa@gmail.com) +- Matt Klein ([mattklein123](https://github.com/mattklein123)) (mklein@lyft.com) +- Yuki Sawa ([ysawa0](https://github.com/ysawa0)) (yukisawa@gmail.com) diff --git a/README.md b/README.md index 98861a14..33e367ba 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,5 @@ -**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* - [Overview](#overview) - [Docker Image](#docker-image) @@ -57,6 +56,7 @@ reads the configuration from disk via [runtime](https://github.com/lyft/goruntim decision is then returned to the caller. # Docker Image + For every main commit, an image is pushed to [Dockerhub](https://hub.docker.com/r/envoyproxy/ratelimit/tags?page=1&ordering=last_updated). There is currently no versioning (post v1.4.0) and tags are based on commit sha. # Supported Envoy APIs @@ -69,26 +69,27 @@ Support for [v2 rls proto](https://github.com/envoyproxy/data-plane-api/blob/mas 1. `v1.0.0` tagged on commit `0ded92a2af8261d43096eba4132e45b99a3b8b14`. Ratelimit has been in production use at Lyft for over 2 years. 2. `v1.1.0` introduces the data-plane-api proto and initiates the deprecation of the legacy [ratelimit.proto](https://github.com/lyft/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto). 3. `e91321b` [commit](https://github.com/envoyproxy/ratelimit/commit/e91321b10f1ad7691d0348e880bd75d0fca05758) deleted support for the legacy [ratelimit.proto](https://github.com/envoyproxy/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto). -The current version of ratelimit protocol is changed to [v3 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) -while [v2 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) is still supported -as a legacy protocol. + The current version of ratelimit protocol is changed to [v3 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) + while [v2 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) is still supported + as a legacy protocol. 4. `4bb32826` deleted support for legacy [v2 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) # Building and Testing -* Install Redis-server. -* Make sure go is setup correctly and checkout rate limit service into your go path. More information about installing -go [here](https://golang.org/doc/install). -* In order to run the integration tests using a local Redis server please run two Redis-server instances: one on port `6379` and another on port `6380` +- Install Redis-server. +- Make sure go is setup correctly and checkout rate limit service into your go path. More information about installing + go [here](https://golang.org/doc/install). +- In order to run the integration tests using a local Redis server please run two Redis-server instances: one on port `6379` and another on port `6380` ```bash redis-server --port 6379 & redis-server --port 6380 & ``` -* To setup for the first time (only done once): +- To setup for the first time (only done once): ```bash make bootstrap ``` -* To compile: +- To compile: + ```bash make compile ``` @@ -99,11 +100,11 @@ go [here](https://golang.org/doc/install). GOOS=linux make compile ``` -* To compile and run tests: +- To compile and run tests: ```bash make tests ``` -* To run the server locally using some sensible default settings you can do this (this will setup the server to read the configuration files from the path you specify): +- To run the server locally using some sensible default settings you can do this (this will setup the server to read the configuration files from the path you specify): ```bash USE_STATSD=false LOG_LEVEL=debug REDIS_SOCKET_TYPE=tcp REDIS_URL=localhost:6379 RUNTIME_ROOT=/home/user/src/runtime/data RUNTIME_SUBDIRECTORY=ratelimit ``` @@ -124,11 +125,15 @@ the docker-compose.yml file to run a second redis container, and change the envi as explained in the [two redis instances](#two-redis-instances) section. ## Full test environment + To run a fully configured environment to demo Envoy based rate limiting, run: + ```bash docker-compose -f docker-compose-example.yml up --build --remove-orphans ``` + This will run ratelimit, redis, prom-statsd-exporter and two Envoy containers such that you can demo rate limiting by hitting the below endpoints. + ```bash curl localhost:8888/test curl localhost:8888/header -H "foo: foo" # Header based @@ -138,11 +143,13 @@ curl localhost:8888/twoheader -H "foo: foo" -H "bar: banned" # Ban a particular curl localhost:8888/twoheader -H "foo: foo" -H "baz: shady" # This will never be ratelimited since "baz" with value "shady" is in shadow_mode curl localhost:8888/twoheader -H "foo: foo" -H "baz: not-so-shady" # This is subject to rate-limiting because the it's now in shadow_mode ``` + Edit `examples/ratelimit/config/example.yaml` to test different rate limit configs. Hot reloading is enabled. The descriptors in `example.yaml` and the actions in `examples/envoy/proxy.yaml` should give you a good idea on how to configure rate limits. To see the metrics in the example + ```bash # The metrics for the shadow_mode keys curl http://localhost:9102/metrics | grep -i shadow @@ -157,7 +164,7 @@ The test suite will spin up a docker-compose environment from `integration-test/ If the test suite fails it will exit with code 1. ```bash -make integration-tests +make integration_tests ``` # Configuration @@ -168,14 +175,14 @@ The rate limit configuration file format is YAML (mainly so that comments are su ### Definitions -* **Domain:** A domain is a container for a set of rate limits. All domains known to the Ratelimit service must be -globally unique. They serve as a way for different teams/projects to have rate limit configurations that don't conflict. -* **Descriptor:** A descriptor is a list of key/value pairs owned by a domain that the Ratelimit service uses to -select the correct rate limit to use when limiting. Descriptors are case-sensitive. Examples of descriptors are: - * ("database", "users") - * ("message_type", "marketing"),("to_number","2061234567") - * ("to_cluster", "service_a") - * ("to_cluster", "service_a"),("from_cluster", "service_b") +- **Domain:** A domain is a container for a set of rate limits. All domains known to the Ratelimit service must be + globally unique. They serve as a way for different teams/projects to have rate limit configurations that don't conflict. +- **Descriptor:** A descriptor is a list of key/value pairs owned by a domain that the Ratelimit service uses to + select the correct rate limit to use when limiting. Descriptors are case-sensitive. Examples of descriptors are: + - ("database", "users") + - ("message_type", "marketing"),("to_number","2061234567") + - ("to_cluster", "service_a") + - ("to_cluster", "service_a"),("from_cluster", "service_b") ### Descriptor list definition @@ -212,6 +219,7 @@ Currently the service supports per second, minute, hour, and day limits. More ty future based on user demand. ### ShadowMode + A shadow_mode key in a rule indicates that whatever the outcome of the evaluation of the rule, the end-result will always be "OK". When a block is in ShadowMode all functions of the rate limiting service are executed as normal, with cache-lookup and statistics @@ -272,7 +280,7 @@ descriptors: In the preceding example, the domain is "messaging" and we setup two different scenarios that illustrate more complex functionality. First, we want to limit on marketing messages to a specific number. To enable this, we make -use of *nested descriptor lists.* The top level descriptor is ("message_type", "marketing"). However this descriptor +use of _nested descriptor lists._ The top level descriptor is ("message_type", "marketing"). However this descriptor does not have a limit assigned so it's just a placeholder. Contained within this entry we have another descriptor list that includes an entry with key "to_number". However, notice that no value is provided. This means that the service will match against any value supplied for "to_number" and generate a unique limit. Thus, ("message_type", "marketing"), @@ -283,7 +291,7 @@ The configuration also sets up another rule without a value. This one creates an any particular number during a 1 day period. Thus, ("to_number", "2061111111") and ("to_number", "2062222222") both get 100 requests per day. -When calling the rate limit service, the client can specify *multiple descriptors* to limit on in a single call. This +When calling the rate limit service, the client can specify _multiple descriptors_ to limit on in a single call. This limits round trips and allows limiting on aggregate rule definitions. For example, using the preceding configuration, the client could send this complete request (in pseudo IDL): @@ -294,7 +302,7 @@ RateLimitRequest: descriptor: ("to_number", "2061111111") ``` -And the service will rate limit against *all* matching rules and return an aggregate result; a logical OR of all +And the service will rate limit against _all_ matching rules and return an aggregate result; a logical OR of all the individual rate limit decisions. #### Example 3 @@ -318,11 +326,12 @@ descriptors: ``` In the preceding example, we setup a generic rate limit for individual IP addresses. The architecture's edge proxy can -be configured to make a rate limit service call with the descriptor ("remote_address", "50.0.0.1") for example. This IP would +be configured to make a rate limit service call with the descriptor `("remote_address", "50.0.0.1")` for example. This IP would get 10 requests per second as would any other IP. However, the configuration also contains a second configuration that explicitly defines a -value along with the same key. If the descriptor ("remote_address", "50.0.0.5") is received, the service will -*attempt the most specific match possible*. This means +value along with the same key. +If the descriptor `("remote_address", "50.0.0.5")` is received, the service +will _attempt the most specific match possible_. This means the most specific descriptor at the same level as your request. Thus, key/value is always attempted as a match before just key. #### Example 4 @@ -387,9 +396,9 @@ This can be useful for collecting statistics, or if one wants to define a descri The return value for unlimited descriptors will be an OK status code with the LimitRemaining field set to MaxUint32 value. - ### Example 6 +### Example 6 - A rule using shadow_mode is useful for soft-launching rate limiting. In this example +A rule using shadow_mode is useful for soft-launching rate limiting. In this example ``` RateLimitRequest: @@ -419,7 +428,6 @@ descriptors: unit: second ``` - ## Loading Configuration The Ratelimit service uses a library written by Lyft called [goruntime](https://github.com/lyft/goruntime) to do configuration loading. Goruntime monitors @@ -437,6 +445,7 @@ RUNTIME_IGNOREDOTFILES default:"false" **Configuration files are loaded from RUNTIME_ROOT/RUNTIME_SUBDIRECTORY/config/\*.yaml** There are two methods for triggering a configuration reload: + 1. Symlink RUNTIME_ROOT to a different directory. 2. Update the contents inside `RUNTIME_ROOT/RUNTIME_SUBDIRECTORY/config/` directly. @@ -467,6 +476,7 @@ LOG_FORMAT=json ``` Output example: + ``` {"@message":"loading domain: messaging","@timestamp":"2020-09-10T17:22:44.926010192Z","level":"debug"} {"@message":"loading descriptor: key=messaging.message_type_marketing","@timestamp":"2020-09-10T17:22:44.926019315Z","level":"debug"} @@ -479,10 +489,12 @@ Output example: ``` ## GRPC Keepalive + Client-side GRPC DNS re-resolution in scenarios with auto scaling enabled might not work as expected and the current workaround is to [configure connection keepalive](https://github.com/grpc/grpc/issues/12295#issuecomment-382794204) on server-side. The behavior can be fixed by configuring the following env variables for the ratelimit server: -* `GRPC_MAX_CONNECTION_AGE`: a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway. A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms. -* `GRPC_MAX_CONNECTION_AGE_GRACE`: an additive period after MaxConnectionAge after which the connection will be forcibly closed. + +- `GRPC_MAX_CONNECTION_AGE`: a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway. A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms. +- `GRPC_MAX_CONNECTION_AGE_GRACE`: an additive period after MaxConnectionAge after which the connection will be forcibly closed. # Request Fields @@ -490,11 +502,15 @@ For information on the fields of a Ratelimit gRPC request please read the inform on the RateLimitRequest message type in the Ratelimit [proto file.](https://github.com/envoyproxy/envoy/blob/master/api/envoy/service/ratelimit/v3/rls.proto) # GRPC Client + The [gRPC client](https://github.com/envoyproxy/ratelimit/blob/master/src/client_cmd/main.go) will interact with ratelimit server and tell you if the requests are over limit. + ## Commandline flags -* `-dial_string`: used to specify the address of ratelimit server. It defaults to `localhost:8081`. -* `-domain`: used to specify the domain. -* `-descriptors`: used to specify one descriptor. You can pass multiple descriptors like following: + +- `-dial_string`: used to specify the address of ratelimit server. It defaults to `localhost:8081`. +- `-domain`: used to specify the domain. +- `-descriptors`: used to specify one descriptor. You can pass multiple descriptors like following: + ``` go run main.go -domain test \ -descriptors name=foo,age=14 -descriptors name=bar,age=18 @@ -505,13 +521,14 @@ go run main.go -domain test \ There is a global shadow-mode which can make it easier to introduce rate limiting into an existing service landscape. It will override whatever result is returned by the regular rate limiting process. ## Configuration + The global shadow mode is configured with an environment variable Setting environment variable`SHADOW_MODE` to `true` will enable the feature. ## Statistics -There is an additional service-level statistics generated that will increment whenever the global shadow mode has overridden a rate limiting result. +There is an additional service-level statistics generated that will increment whenever the global shadow mode has overridden a rate limiting result. # Statistics @@ -526,17 +543,20 @@ ratelimit.service.rate_limit.DOMAIN.KEY_VALUE.STAT ``` DOMAIN: -* As specified in the domain value in the YAML runtime file + +- As specified in the domain value in the YAML runtime file KEY_VALUE: -* A combination of the key value -* Nested descriptors would be suffixed in the stats path + +- A combination of the key value +- Nested descriptors would be suffixed in the stats path STAT: -* near_limit: Number of rule hits over the NearLimit ratio threshold (currently 80%) but under the threshold rate. -* over_limit: Number of rule hits exceeding the threshold rate -* total_hits: Number of rule hits in total -* shadow_mode: Number of rule hits where shadow_mode would trigger and override the over_limit result + +- near_limit: Number of rule hits over the NearLimit ratio threshold (currently 80%) but under the threshold rate. +- over_limit: Number of rule hits exceeding the threshold rate +- total_hits: Number of rule hits in total +- shadow_mode: Number of rule hits where shadow_mode would trigger and override the over_limit result To use a custom near_limit ratio threshold, you can specify with `NEAR_LIMIT_RATIO` environment variable. It defaults to `0.8` (0-1 scale). These are examples of generated stats for some configured rate limit rules from the above examples: @@ -559,28 +579,29 @@ ratelimit.service.rate_limit.messaging.auth-service.over_limit.shadow_mode: 1 # HTTP Port The ratelimit service listens to HTTP 1.1 (by default on port 8080) with two endpoints: + 1. /healthcheck → return a 200 if this service is healthy 1. /json → HTTP 1.1 endpoint for interacting with ratelimit service ## /json endpoint Takes an HTTP POST with a JSON body of the form e.g. + ```json { "domain": "dummy", "descriptors": [ - {"entries": [ - {"key": "one_per_day", - "value": "something"} - ]} + { "entries": [{ "key": "one_per_day", "value": "something" }] } ] } ``` + The service will return an http 200 if this request is allowed (if no ratelimits exceeded) or 429 if one or more ratelimits were exceeded. The response is a RateLimitResponse encoded with [proto3-to-json mapping](https://developers.google.com/protocol-buffers/docs/proto3#json): + ```json { "overallCode": "OVER_LIMIT", @@ -656,10 +677,10 @@ By default, for each request, ratelimit will pick up a connection from pool, wri For high throughput scenarios, ratelimit also support [implicit pipelining](https://github.com/mediocregopher/radix/blob/v3.5.1/pool.go#L238) . It can be configured using the following environment variables: -1. `REDIS_PIPELINE_WINDOW` & `REDIS_PERSECOND_PIPELINE_WINDOW`: sets the duration after which internal pipelines will be flushed. -If window is zero then implicit pipelining will be disabled. +1. `REDIS_PIPELINE_WINDOW` & `REDIS_PERSECOND_PIPELINE_WINDOW`: sets the duration after which internal pipelines will be flushed. + If window is zero then implicit pipelining will be disabled. 1. `REDIS_PIPELINE_LIMIT` & `REDIS_PERSECOND_PIPELINE_LIMIT`: sets maximum number of commands that can be pipelined before flushing. -If limit is zero then no limit will be used and pipelines will only be limited by the specified time window. + If limit is zero then no limit will be used and pipelines will only be limited by the specified time window. `implicit pipelining` is disabled by default. To enable it, you can use default values [used by radix](https://github.com/mediocregopher/radix/blob/v3.5.1/pool.go#L278) and tune for the optimal value. @@ -713,9 +734,11 @@ When using multiple memcache nodes in `MEMCACHE_HOST_PORT=`, one should provide to all ratelimiter instances to ensure that a particular cache key is always hashed to the same memcache node. # Custom headers + Ratelimit service can be configured to return custom headers with the ratelimit information. It will populate the response_headers_to_add as part of the [RateLimitResponse](https://www.envoyproxy.io/docs/envoy/latest/api-v3/service/ratelimit/v3/rls.proto#service-ratelimit-v3-ratelimitresponse). The following environment variables control the custom response feature: + 1. `LIMIT_RESPONSE_HEADERS_ENABLED` - Enables the custom response headers 1. `LIMIT_LIMIT_HEADER` - The default value is "RateLimit-Limit", setting the environment variable will specify an alternative header name 1. `LIMIT_REMAINING_HEADER` - The default value is "RateLimit-Remaining", setting the environment variable will specify an alternative header name @@ -723,13 +746,13 @@ The following environment variables control the custom response feature: # Contact -* [envoy-announce](https://groups.google.com/forum/#!forum/envoy-announce): Low frequency mailing +- [envoy-announce](https://groups.google.com/forum/#!forum/envoy-announce): Low frequency mailing list where we will email announcements only. -* [envoy-users](https://groups.google.com/forum/#!forum/envoy-users): General user discussion. +- [envoy-users](https://groups.google.com/forum/#!forum/envoy-users): General user discussion. Please add `[ratelimit]` to the email subject. -* [envoy-dev](https://groups.google.com/forum/#!forum/envoy-dev): Envoy developer discussion (APIs, +- [envoy-dev](https://groups.google.com/forum/#!forum/envoy-dev): Envoy developer discussion (APIs, feature design, etc.). Please add `[ratelimit]` to the email subject. -* [Slack](https://envoyproxy.slack.com/): Slack, to get invited go [here](http://envoyslack.cncf.io). +- [Slack](https://envoyproxy.slack.com/): Slack, to get invited go [here](http://envoyslack.cncf.io). We have the IRC/XMPP gateways enabled if you prefer either of those. Once an account is created, connection instructions for IRC/XMPP can be found [here](https://envoyproxy.slack.com/account/gateways). The `#ratelimit-users` channel is used for discussions about the ratelimit service. diff --git a/docker-compose-example.yml b/docker-compose-example.yml index 9739feaa..ba4cc69e 100644 --- a/docker-compose-example.yml +++ b/docker-compose-example.yml @@ -65,11 +65,11 @@ services: networks: - ratelimit-network expose: - - "8888" - - "8001" + - "8888" + - "8001" ports: - - "8888:8888" - - "8001:8001" + - "8888:8888" + - "8001:8001" envoy-mock: image: envoyproxy/envoy-dev:latest @@ -86,9 +86,9 @@ services: networks: - ratelimit-network expose: - - "9999" + - "9999" ports: - - "9999:9999" + - "9999:9999" networks: ratelimit-network: diff --git a/docker-compose.yml b/docker-compose.yml index 88d3a86e..8271245e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,6 +1,5 @@ version: "3" services: - redis: image: redis:alpine expose: @@ -39,7 +38,7 @@ services: ratelimit: image: alpine:3.6 command: > - sh -c "until test -f /usr/local/bin/ratelimit; do sleep 5; done; /usr/local/bin/ratelimit" + sh -c "until test -f /usr/local/bin/ratelimit; do sleep 5; done; /usr/local/bin/ratelimit" ports: - 8080:8080 - 8081:8081 diff --git a/examples/prom-statsd-exporter/conf.yaml b/examples/prom-statsd-exporter/conf.yaml index f6649c2f..31f16dd6 100644 --- a/examples/prom-statsd-exporter/conf.yaml +++ b/examples/prom-statsd-exporter/conf.yaml @@ -1,59 +1,51 @@ mappings: # Requires statsd exporter >= v0.6.0 since it uses the "drop" action. - - match: - "ratelimit.service.rate_limit.*.*.near_limit" + - match: "ratelimit.service.rate_limit.*.*.near_limit" name: "ratelimit_service_rate_limit_near_limit" timer_type: "histogram" labels: domain: "$1" key1: "$2" - - match: - "ratelimit.service.rate_limit.*.*.over_limit" + - match: "ratelimit.service.rate_limit.*.*.over_limit" name: "ratelimit_service_rate_limit_over_limit" timer_type: "histogram" labels: domain: "$1" key1: "$2" - - match: - "ratelimit.service.rate_limit.*.*.total_hits" + - match: "ratelimit.service.rate_limit.*.*.total_hits" name: "ratelimit_service_rate_limit_total_hits" timer_type: "histogram" labels: domain: "$1" key1: "$2" - - match: - "ratelimit.service.rate_limit.*.*.within_limit" + - match: "ratelimit.service.rate_limit.*.*.within_limit" name: "ratelimit_service_rate_limit_within_limit" timer_type: "histogram" labels: domain: "$1" key1: "$2" - - match: - "ratelimit.service.rate_limit.*.*.*.near_limit" + - match: "ratelimit.service.rate_limit.*.*.*.near_limit" name: "ratelimit_service_rate_limit_near_limit" timer_type: "histogram" labels: domain: "$1" key1: "$2" key2: "$3" - - match: - "ratelimit.service.rate_limit.*.*.*.over_limit" + - match: "ratelimit.service.rate_limit.*.*.*.over_limit" name: "ratelimit_service_rate_limit_over_limit" timer_type: "histogram" labels: domain: "$1" key1: "$2" key2: "$3" - - match: - "ratelimit.service.rate_limit.*.*.*.total_hits" + - match: "ratelimit.service.rate_limit.*.*.*.total_hits" name: "ratelimit_service_rate_limit_total_hits" timer_type: "histogram" labels: domain: "$1" key1: "$2" key2: "$3" - - match: - "ratelimit.service.rate_limit.*.*.*.within_limit" + - match: "ratelimit.service.rate_limit.*.*.*.within_limit" name: "ratelimit_service_rate_limit_within_limit" timer_type: "histogram" labels: @@ -86,8 +78,7 @@ mappings: # Requires statsd exporter >= v0.6.0 since it uses the "drop" action. name: "ratelimit_service_config_load_error" match_metric_type: counter - - match: - "ratelimit.service.rate_limit.*.*.*.shadow_mode" + - match: "ratelimit.service.rate_limit.*.*.*.shadow_mode" name: "ratelimit_service_rate_limit_shadow_mode" timer_type: "histogram" labels: @@ -95,7 +86,6 @@ mappings: # Requires statsd exporter >= v0.6.0 since it uses the "drop" action. key1: "$2" key2: "$3" - # Enable below in production once you have the metrics you need # - match: "." # match_type: "regex" diff --git a/integration-test/docker-compose-integration-test.yml b/integration-test/docker-compose-integration-test.yml index 7cfd2eb9..d37264cb 100644 --- a/integration-test/docker-compose-integration-test.yml +++ b/integration-test/docker-compose-integration-test.yml @@ -69,11 +69,11 @@ services: networks: - ratelimit-network expose: - - "8888" - - "8001" + - "8888" + - "8001" ports: - - "8888:8888" - - "8001:8001" + - "8888:8888" + - "8001:8001" envoy-mock: image: envoyproxy/envoy-dev:latest @@ -90,9 +90,9 @@ services: networks: - ratelimit-network expose: - - "9999" + - "9999" ports: - - "9999:9999" + - "9999:9999" tester: build: diff --git a/integration-test/run-all.sh b/integration-test/run-all.sh index 58987603..fe8c94ee 100755 --- a/integration-test/run-all.sh +++ b/integration-test/run-all.sh @@ -3,13 +3,12 @@ echo "Running tests" FILES=/test/scripts/* -for f in $FILES -do - echo "Processing $f file..." - # take action on each file. $f store current file name - $f - if [ $? -ne 0 ] ; then - echo "Failed file $f" - exit 1 - fi -done \ No newline at end of file +for f in $FILES; do + echo "Processing $f file..." + # take action on each file. $f store current file name + $f + if [ $? -ne 0 ]; then + echo "Failed file $f" + exit 1 + fi +done diff --git a/integration-test/scripts/simple-get.sh b/integration-test/scripts/simple-get.sh index 54233885..74466c50 100755 --- a/integration-test/scripts/simple-get.sh +++ b/integration-test/scripts/simple-get.sh @@ -3,6 +3,6 @@ # Just happy path curl -s -f -H "foo: test" -H "baz: shady" http://envoy-proxy:8888/twoheader -if [ $? -ne 0 ] ; then - exit 1 -fi \ No newline at end of file +if [ $? -ne 0 ]; then + exit 1 +fi diff --git a/integration-test/scripts/trigger-ratelimit.sh b/integration-test/scripts/trigger-ratelimit.sh index 49b36ada..57f42080 100755 --- a/integration-test/scripts/trigger-ratelimit.sh +++ b/integration-test/scripts/trigger-ratelimit.sh @@ -5,24 +5,24 @@ # Has rate limit quota 3 req / min # -response=`curl -f -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader` -response=`curl -f -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader` -response=`curl -f -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader` +response=$(curl -f -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader) +response=$(curl -f -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader) +response=$(curl -f -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader) -if [ $? -ne 0 ] ; then - echo "Rate limit should not trigger yet" - exit 1 +if [ $? -ne 0 ]; then + echo "Rate limit should not trigger yet" + exit 1 fi -response=`curl -f -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader` +response=$(curl -f -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader) -if [ $? -eq 0 ] ; then - echo "Rate limiting should fail the request" - exit 1 +if [ $? -eq 0 ]; then + echo "Rate limiting should fail the request" + exit 1 fi -response=`curl -i -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader | grep "Too Many Requests"` -if [ $? -ne 0 ] ; then - echo "This should trigger a ratelimit" - exit 1 +response=$(curl -i -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader | grep "Too Many Requests") +if [ $? -ne 0 ]; then + echo "This should trigger a ratelimit" + exit 1 fi diff --git a/integration-test/scripts/trigger-shadow-mode-key.sh b/integration-test/scripts/trigger-shadow-mode-key.sh index b72ec7d5..3204284e 100755 --- a/integration-test/scripts/trigger-shadow-mode-key.sh +++ b/integration-test/scripts/trigger-shadow-mode-key.sh @@ -6,20 +6,20 @@ # shadow_mode is true # -response=`curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader` -response=`curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader` -response=`curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader` -response=`curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader` -response=`curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader` +response=$(curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader) +response=$(curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader) +response=$(curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader) +response=$(curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader) +response=$(curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader) -if [ $? -ne 0 ] ; then - echo "Shadow Mode key should not trigger an error, even if we have exceeded the quota" - exit 1 +if [ $? -ne 0 ]; then + echo "Shadow Mode key should not trigger an error, even if we have exceeded the quota" + exit 1 fi -remaining=`curl -i -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader | grep x-ratelimit-remaining | cut -d: -f2 | cut -d: -f2 | sed 's/ //g'` +remaining=$(curl -i -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader | grep x-ratelimit-remaining | cut -d: -f2 | cut -d: -f2 | sed 's/ //g') -if [ $remaining == "0" ] ; then - echo "Remaining should be zero" - exit 1 +if [ $remaining == "0" ]; then + echo "Remaining should be zero" + exit 1 fi diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 00000000..7a82dfc2 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1 @@ +pre-commit>=2.9.3 diff --git a/script/docs_check_format b/script/docs_check_format index 3fb4faf5..6891f0a4 100755 --- a/script/docs_check_format +++ b/script/docs_check_format @@ -1,9 +1,9 @@ #!/bin/bash if ! npm list -g | grep -q doctoc; then - npm install -g doctoc + npm install -g doctoc fi -TEMP_DIR=`mktemp -d` +TEMP_DIR=$(mktemp -d) cp README.md $TEMP_DIR/ doctoc $TEMP_DIR @@ -13,6 +13,6 @@ DIFF_RESULT=$? rm -fr $TEMP_DIR if [[ $DIFF_RESULT != 0 ]]; then - echo "doc formatting is invalid. run make fix_format" - exit 1 + echo "doc formatting is invalid. run make fix_format" + exit 1 fi diff --git a/script/docs_fix_format b/script/docs_fix_format index c0c1da3e..e8be01b6 100755 --- a/script/docs_fix_format +++ b/script/docs_fix_format @@ -1,6 +1,6 @@ #!/bin/bash if ! npm list -g | grep -q doctoc; then - npm install -g doctoc + npm install -g doctoc fi doctoc README.md diff --git a/script/lint b/script/lint index 564b8860..353c4047 100755 --- a/script/lint +++ b/script/lint @@ -1,6 +1,5 @@ #!/usr/bin/env bash -if [ ! -z "$SRCPATH" ] -then +if [ ! -z "$SRCPATH" ]; then cd $SRCPATH export PATH=/go/bin:$PATH fi @@ -9,7 +8,7 @@ PKGS=$(find . -maxdepth 3 -type d | sed s/\.\\/// | grep -vE '.git|\.|script|ven LINT_PKGS=$(echo ${PKGS} | grep -v 'tests/generated') for pkg in $LINT_PKGS; do - golint $pkg | grep -v comment + golint $pkg | grep -v comment done go vet $(glide nv) diff --git a/src/config/config.go b/src/config/config.go index d174f100..d6f7e530 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -3,8 +3,9 @@ package config import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/envoyproxy/ratelimit/src/stats" "golang.org/x/net/context" + + "github.com/envoyproxy/ratelimit/src/stats" ) // Errors that may be raised during config parsing. diff --git a/src/config/config_impl.go b/src/config/config_impl.go index aca94425..ec3fa4a6 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -6,10 +6,11 @@ import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/envoyproxy/ratelimit/src/stats" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" "gopkg.in/yaml.v2" + + "github.com/envoyproxy/ratelimit/src/stats" ) type yamlRateLimit struct { @@ -96,7 +97,6 @@ func newRateLimitConfigError(config RateLimitConfigToLoad, err string) RateLimit // @param descriptors supplies the YAML descriptors to load. // @param statsManager that owns the stats.Scope. func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, parentKey string, descriptors []yamlDescriptor, statsManager stats.Manager) { - for _, descriptorConfig := range descriptors { if descriptorConfig.Key == "" { panic(newRateLimitConfigError(config, "descriptor has empty key")) diff --git a/src/config_check_cmd/main.go b/src/config_check_cmd/main.go index e451694c..0c7480e2 100644 --- a/src/config_check_cmd/main.go +++ b/src/config_check_cmd/main.go @@ -3,14 +3,16 @@ package main import ( "flag" "fmt" - "github.com/envoyproxy/ratelimit/src/settings" - "github.com/envoyproxy/ratelimit/src/stats" "io/ioutil" "os" "path/filepath" - "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/stats" + gostats "github.com/lyft/gostats" + + "github.com/envoyproxy/ratelimit/src/config" ) func loadConfigs(allConfigs []config.RateLimitConfigToLoad) { diff --git a/src/limiter/base_limiter.go b/src/limiter/base_limiter.go index cbdde93a..f1a7de22 100644 --- a/src/limiter/base_limiter.go +++ b/src/limiter/base_limiter.go @@ -6,11 +6,12 @@ import ( "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + logger "github.com/sirupsen/logrus" + "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/stats" "github.com/envoyproxy/ratelimit/src/utils" - logger "github.com/sirupsen/logrus" ) type BaseRateLimiter struct { @@ -33,8 +34,10 @@ type LimitInfo struct { func NewRateLimitInfo(limit *config.RateLimit, limitBeforeIncrease uint32, limitAfterIncrease uint32, nearLimitThreshold uint32, overLimitThreshold uint32) *LimitInfo { - return &LimitInfo{limit: limit, limitBeforeIncrease: limitBeforeIncrease, limitAfterIncrease: limitAfterIncrease, - nearLimitThreshold: nearLimitThreshold, overLimitThreshold: overLimitThreshold} + return &LimitInfo{ + limit: limit, limitBeforeIncrease: limitBeforeIncrease, limitAfterIncrease: limitAfterIncrease, + nearLimitThreshold: nearLimitThreshold, overLimitThreshold: overLimitThreshold, + } } // Generates cache keys for given rate limit request. Each cache key is represented by a concatenation of diff --git a/src/limiter/cache.go b/src/limiter/cache.go index 5ca07ede..eb914eb1 100644 --- a/src/limiter/cache.go +++ b/src/limiter/cache.go @@ -2,8 +2,9 @@ package limiter import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/envoyproxy/ratelimit/src/config" "golang.org/x/net/context" + + "github.com/envoyproxy/ratelimit/src/config" ) // Interface for interacting with a cache backend for rate limiting. diff --git a/src/limiter/cache_key.go b/src/limiter/cache_key.go index 797cc2e1..4aeab204 100644 --- a/src/limiter/cache_key.go +++ b/src/limiter/cache_key.go @@ -7,6 +7,7 @@ import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/utils" ) @@ -74,5 +75,6 @@ func (this *CacheKeyGenerator) GenerateCacheKey( return CacheKey{ Key: b.String(), - PerSecond: isPerSecondLimit(limit.Limit.Unit)} + PerSecond: isPerSecondLimit(limit.Limit.Unit), + } } diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index 72bc216f..5ddbe664 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -17,12 +17,13 @@ package memcached import ( "context" - "github.com/envoyproxy/ratelimit/src/stats" "math/rand" "strconv" "sync" "time" + "github.com/envoyproxy/ratelimit/src/stats" + "github.com/coocood/freecache" gostats "github.com/lyft/gostats" diff --git a/src/metrics/metrics.go b/src/metrics/metrics.go index c1eb1109..fffd9dce 100644 --- a/src/metrics/metrics.go +++ b/src/metrics/metrics.go @@ -2,9 +2,10 @@ package metrics import ( "context" + "time" + stats "github.com/lyft/gostats" "google.golang.org/grpc" - "time" ) type serverMetrics struct { diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index 9bec14bb..1c9417d7 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -4,6 +4,7 @@ import ( "math/rand" "github.com/coocood/freecache" + "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/server" "github.com/envoyproxy/ratelimit/src/settings" diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index 6997e5dc..26c3902e 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -7,11 +7,12 @@ import ( "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + logger "github.com/sirupsen/logrus" + "golang.org/x/net/context" + "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/utils" - logger "github.com/sirupsen/logrus" - "golang.org/x/net/context" ) type fixedRateLimitCacheImpl struct { diff --git a/src/server/server_impl.go b/src/server/server_impl.go index c3f3dbb7..bfd66616 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -4,27 +4,24 @@ import ( "bytes" "expvar" "fmt" - "google.golang.org/grpc/keepalive" "io" + "net" "net/http" "net/http/pprof" + "os" + "os/signal" "path/filepath" "sort" "strconv" "sync" - - "github.com/envoyproxy/ratelimit/src/stats" - - "os" - "os/signal" "syscall" - "net" + "google.golang.org/grpc/keepalive" + + "github.com/envoyproxy/ratelimit/src/stats" "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/envoyproxy/ratelimit/src/limiter" - "github.com/envoyproxy/ratelimit/src/settings" "github.com/golang/protobuf/jsonpb" "github.com/gorilla/mux" reuseport "github.com/kavu/go_reuseport" @@ -34,6 +31,9 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/health" healthpb "google.golang.org/grpc/health/grpc_health_v1" + + "github.com/envoyproxy/ratelimit/src/limiter" + "github.com/envoyproxy/ratelimit/src/settings" ) type serverDebugListener struct { @@ -215,7 +215,6 @@ func newServer(s settings.Settings, name string, statsManager stats.Manager, loc ret.store.ScopeWithTags("runtime", s.ExtraTags), &loader.SymlinkRefresher{RuntimePath: s.RuntimePath}, loaderOpts...) - } else { ret.runtime = loader.New( filepath.Join(s.RuntimePath, s.RuntimeSubdirectory), diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index 67004ecf..6b4b2347 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -14,13 +14,14 @@ import ( core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/lyft/goruntime/loader" + logger "github.com/sirupsen/logrus" + "golang.org/x/net/context" + "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" - "github.com/lyft/goruntime/loader" - logger "github.com/sirupsen/logrus" - "golang.org/x/net/context" ) type RateLimitServiceServer interface { @@ -210,7 +211,6 @@ func (this *service) shouldRateLimitWorker( } func (this *service) rateLimitLimitHeader(descriptor *pb.RateLimitResponse_DescriptorStatus) *core.HeaderValue { - // Limit header only provides the mandatory part from the spec, the actual limit // the optional quota policy is currently not provided return &core.HeaderValue{ @@ -220,7 +220,6 @@ func (this *service) rateLimitLimitHeader(descriptor *pb.RateLimitResponse_Descr } func (this *service) rateLimitRemainingHeader(descriptor *pb.RateLimitResponse_DescriptorStatus) *core.HeaderValue { - // How much of the limit is remaining return &core.HeaderValue{ Key: this.customHeaderRemainingHeader, diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 54d8d570..8be85288 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -17,6 +17,8 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + logger "github.com/sirupsen/logrus" + "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/memcached" @@ -25,7 +27,6 @@ import ( ratelimit "github.com/envoyproxy/ratelimit/src/service" "github.com/envoyproxy/ratelimit/src/settings" "github.com/envoyproxy/ratelimit/src/utils" - logger "github.com/sirupsen/logrus" ) type Runner struct { diff --git a/src/srv/srv.go b/src/srv/srv.go index 041ceb95..7262d1b4 100644 --- a/src/srv/srv.go +++ b/src/srv/srv.go @@ -23,14 +23,12 @@ func ParseSrv(srv string) (string, string, string, error) { func ServerStringsFromSrv(srv string) ([]string, error) { service, proto, name, err := ParseSrv(srv) - if err != nil { logger.Errorf("failed to parse SRV: %s", err) return nil, err } _, srvs, err := net.LookupSRV(service, proto, name) - if err != nil { logger.Errorf("failed to lookup SRV: %s", err) return nil, err diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index 7748dde7..effad309 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -1,9 +1,10 @@ package stats import ( - "github.com/envoyproxy/ratelimit/src/settings" gostats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" + + "github.com/envoyproxy/ratelimit/src/settings" ) func NewStatManager(store gostats.Store, settings settings.Settings) *ManagerImpl { diff --git a/test/common/common.go b/test/common/common.go index 54216f1f..0fec1ae1 100644 --- a/test/common/common.go +++ b/test/common/common.go @@ -125,7 +125,6 @@ func startCacheProcess(ctx context.Context, command string, args []string, port }() err := cmd.Start() - if err != nil { cancel() return nil, fmt.Errorf("Problem starting %s subprocess: %v", command, err) diff --git a/test/config/config_test.go b/test/config/config_test.go index 3caaf2cc..d22bdcaf 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -9,10 +9,11 @@ import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" pb_type "github.com/envoyproxy/go-control-plane/envoy/type/v3" - "github.com/envoyproxy/ratelimit/src/config" - mockstats "github.com/envoyproxy/ratelimit/test/mocks/stats" stats "github.com/lyft/gostats" "github.com/stretchr/testify/assert" + + "github.com/envoyproxy/ratelimit/src/config" + mockstats "github.com/envoyproxy/ratelimit/test/mocks/stats" ) func loadFile(path string) []config.RateLimitConfigToLoad { @@ -350,7 +351,6 @@ func TestMisspelledKey(t *testing.T) { config.NewRateLimitConfigImpl( loadFile("misspelled_key2.yaml"), mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) - }, "misspelled_key2.yaml: config error, unknown key 'requestsperunit'") } diff --git a/test/config/duplicate_key.yaml b/test/config/duplicate_key.yaml index daa6a3da..5551c1f2 100644 --- a/test/config/duplicate_key.yaml +++ b/test/config/duplicate_key.yaml @@ -4,4 +4,4 @@ descriptors: value: value1 - key: key1 - value: value1 \ No newline at end of file + value: value1 diff --git a/test/config/misspelled_key.yaml b/test/config/misspelled_key.yaml index 2b2d0a07..2dea25b7 100644 --- a/test/config/misspelled_key.yaml +++ b/test/config/misspelled_key.yaml @@ -4,4 +4,4 @@ descriptors: value: value1 ratelimit: unit: day - requests_per_unit: 5 \ No newline at end of file + requests_per_unit: 5 diff --git a/test/config/misspelled_key2.yaml b/test/config/misspelled_key2.yaml index cbb7f504..f9ba3dce 100644 --- a/test/config/misspelled_key2.yaml +++ b/test/config/misspelled_key2.yaml @@ -4,4 +4,4 @@ descriptors: value: value1 rate_limit: unit: day - requestsperunit: 5 \ No newline at end of file + requestsperunit: 5 diff --git a/test/config/non_map_list.yaml b/test/config/non_map_list.yaml index 5a075903..c5a0da52 100644 --- a/test/config/non_map_list.yaml +++ b/test/config/non_map_list.yaml @@ -2,4 +2,4 @@ domain: test-domain descriptors: - a - b - - c \ No newline at end of file + - c diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 2699b405..2d269eb5 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -12,15 +12,16 @@ import ( "time" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/envoyproxy/ratelimit/src/memcached" - "github.com/envoyproxy/ratelimit/src/service_cmd/runner" - "github.com/envoyproxy/ratelimit/src/settings" - "github.com/envoyproxy/ratelimit/test/common" "github.com/golang/protobuf/ptypes/duration" "github.com/kelseyhightower/envconfig" "github.com/stretchr/testify/assert" "golang.org/x/net/context" "google.golang.org/grpc" + + "github.com/envoyproxy/ratelimit/src/memcached" + "github.com/envoyproxy/ratelimit/src/service_cmd/runner" + "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/test/common" ) func init() { @@ -52,7 +53,6 @@ func defaultSettings() settings.Settings { } func newDescriptorStatus(status pb.RateLimitResponse_Code, requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, limitRemaining uint32, durRemaining *duration.Duration) *pb.RateLimitResponse_DescriptorStatus { - limit := &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit} return &pb.RateLimitResponse_DescriptorStatus{ @@ -387,7 +387,8 @@ func testBasicBaseConfig(s settings.Settings) func(*testing.T) { assert, &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, - Statuses: []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}}, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}, + }, response) assert.NoError(err) @@ -409,7 +410,9 @@ func testBasicBaseConfig(s settings.Settings) func(*testing.T) { &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ - newDescriptorStatus(pb.RateLimitResponse_OK, 50, pb.RateLimitResponse_RateLimit_SECOND, 49, durRemaining)}}, + newDescriptorStatus(pb.RateLimitResponse_OK, 50, pb.RateLimitResponse_RateLimit_SECOND, 49, durRemaining), + }, + }, response) assert.NoError(err) @@ -451,7 +454,9 @@ func testBasicBaseConfig(s settings.Settings) func(*testing.T) { &pb.RateLimitResponse{ OverallCode: status, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ - newDescriptorStatus(status, 20, pb.RateLimitResponse_RateLimit_MINUTE, limitRemaining, durRemaining)}}, + newDescriptorStatus(status, 20, pb.RateLimitResponse_RateLimit_MINUTE, limitRemaining, durRemaining), + }, + }, response) assert.NoError(err) key2HitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.total_hits", getCacheKey("key2", enable_local_cache))) @@ -499,7 +504,8 @@ func testBasicBaseConfig(s settings.Settings) func(*testing.T) { "another", [][][2]string{ {{getCacheKey("key2", enable_local_cache), strconv.Itoa(randomInt)}}, - {{getCacheKey("key3", enable_local_cache), strconv.Itoa(randomInt)}}}, 1)) + {{getCacheKey("key3", enable_local_cache), strconv.Itoa(randomInt)}}, + }, 1)) status := pb.RateLimitResponse_OK limitRemaining1 := uint32(20 - (i + 1)) @@ -516,7 +522,9 @@ func testBasicBaseConfig(s settings.Settings) func(*testing.T) { OverallCode: status, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ newDescriptorStatus(pb.RateLimitResponse_OK, 20, pb.RateLimitResponse_RateLimit_MINUTE, limitRemaining1, durRemaining1), - newDescriptorStatus(status, 10, pb.RateLimitResponse_RateLimit_HOUR, limitRemaining2, durRemaining2)}}, + newDescriptorStatus(status, 10, pb.RateLimitResponse_RateLimit_HOUR, limitRemaining2, durRemaining2), + }, + }, response) assert.NoError(err) @@ -630,7 +638,8 @@ func testConfigReload(s settings.Settings) func(*testing.T) { assert, &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, - Statuses: []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK}}}, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK}}, + }, response) assert.NoError(err) @@ -688,7 +697,9 @@ func testConfigReload(s settings.Settings) func(*testing.T) { &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ - newDescriptorStatus(pb.RateLimitResponse_OK, 50, pb.RateLimitResponse_RateLimit_SECOND, 49, durRemaining)}}, + newDescriptorStatus(pb.RateLimitResponse_OK, 50, pb.RateLimitResponse_RateLimit_SECOND, 49, durRemaining), + }, + }, response) assert.NoError(err) diff --git a/test/limiter/base_limiter_test.go b/test/limiter/base_limiter_test.go index 76e9df65..1a9a4d15 100644 --- a/test/limiter/base_limiter_test.go +++ b/test/limiter/base_limiter_test.go @@ -8,13 +8,14 @@ import ( "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/golang/mock/gomock" + stats "github.com/lyft/gostats" + "github.com/stretchr/testify/assert" + "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/test/common" mock_utils "github.com/envoyproxy/ratelimit/test/mocks/utils" - "github.com/golang/mock/gomock" - stats "github.com/lyft/gostats" - "github.com/stretchr/testify/assert" ) func TestGenerateCacheKeys(t *testing.T) { @@ -206,7 +207,6 @@ func TestGetResponseStatusBelowLimit(t *testing.T) { assert.Equal(uint64(1), limits[0].Stats.WithinLimit.Value()) // No shadow_mode so, no stats change assert.Equal(uint64(0), limits[0].Stats.ShadowMode.Value()) - } func TestGetResponseStatusBelowLimitShadowMode(t *testing.T) { @@ -228,5 +228,4 @@ func TestGetResponseStatusBelowLimitShadowMode(t *testing.T) { assert.Equal(uint64(1), limits[0].Stats.WithinLimit.Value()) // No shadow_mode so, no stats change assert.Equal(uint64(0), limits[0].Stats.ShadowMode.Value()) - } diff --git a/test/memcached/cache_impl_test.go b/test/memcached/cache_impl_test.go index f29a6295..a4640148 100644 --- a/test/memcached/cache_impl_test.go +++ b/test/memcached/cache_impl_test.go @@ -15,18 +15,20 @@ import ( "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + stats "github.com/lyft/gostats" + "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/memcached" "github.com/envoyproxy/ratelimit/src/settings" "github.com/envoyproxy/ratelimit/src/utils" - stats "github.com/lyft/gostats" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" "github.com/envoyproxy/ratelimit/test/common" mock_memcached "github.com/envoyproxy/ratelimit/test/mocks/memcached" mock_utils "github.com/envoyproxy/ratelimit/test/mocks/utils" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" ) func TestMemcached(t *testing.T) { @@ -71,10 +73,13 @@ func TestMemcached(t *testing.T) { }, 1) limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false), + } assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[1].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[1].Stats.OverLimit.Value()) @@ -88,7 +93,8 @@ func TestMemcached(t *testing.T) { }).Return( getMultiResult(map[string]int{ "domain_key3_value3_997200": 10, - "domain_key3_value3_subkey3_subvalue3_950400": 12}), + "domain_key3_value3_subkey3_subvalue3_950400": 12, + }), nil, ) client.EXPECT().Increment("domain_key3_value3_997200", uint64(1)).Return(uint64(11), nil) @@ -102,11 +108,13 @@ func TestMemcached(t *testing.T) { }, 1) limits = []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false, false), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false), + } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -229,11 +237,13 @@ func TestOverLimitWithLocalCache(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false), + } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -253,7 +263,8 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -273,7 +284,8 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -290,7 +302,8 @@ func TestOverLimitWithLocalCache(t *testing.T) { client.EXPECT().Increment("domain_key4_value4_997200", uint64(1)).Times(0) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(4), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) @@ -325,11 +338,13 @@ func TestNearLimit(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false), + } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -345,7 +360,8 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -362,7 +378,8 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) diff --git a/test/memcached/stats_collecting_client_test.go b/test/memcached/stats_collecting_client_test.go index 548b9304..d2e2b5cd 100644 --- a/test/memcached/stats_collecting_client_test.go +++ b/test/memcached/stats_collecting_client_test.go @@ -5,11 +5,12 @@ import ( "testing" "github.com/bradfitz/gomemcache/memcache" - "github.com/envoyproxy/ratelimit/src/memcached" - mock_memcached "github.com/envoyproxy/ratelimit/test/mocks/memcached" "github.com/golang/mock/gomock" stats "github.com/lyft/gostats" "github.com/stretchr/testify/assert" + + "github.com/envoyproxy/ratelimit/src/memcached" + mock_memcached "github.com/envoyproxy/ratelimit/test/mocks/memcached" ) type fakeSink struct { diff --git a/test/metrics/metrics_test.go b/test/metrics/metrics_test.go index 04ffbd68..a3537742 100644 --- a/test/metrics/metrics_test.go +++ b/test/metrics/metrics_test.go @@ -5,11 +5,12 @@ import ( "testing" "time" - "github.com/envoyproxy/ratelimit/src/metrics" stats "github.com/lyft/gostats" statsMock "github.com/lyft/gostats/mock" "github.com/stretchr/testify/assert" "google.golang.org/grpc" + + "github.com/envoyproxy/ratelimit/src/metrics" ) func TestMetricsInterceptor(t *testing.T) { diff --git a/test/mocks/config/config.go b/test/mocks/config/config.go index b34328dc..7ac5220c 100644 --- a/test/mocks/config/config.go +++ b/test/mocks/config/config.go @@ -6,11 +6,13 @@ package mock_config import ( context "context" + reflect "reflect" + envoy_extensions_common_ratelimit_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + gomock "github.com/golang/mock/gomock" + config "github.com/envoyproxy/ratelimit/src/config" stats "github.com/envoyproxy/ratelimit/src/stats" - gomock "github.com/golang/mock/gomock" - reflect "reflect" ) // MockRateLimitConfig is a mock of RateLimitConfig interface diff --git a/test/mocks/limiter/limiter.go b/test/mocks/limiter/limiter.go index 48f995a1..8e47f042 100644 --- a/test/mocks/limiter/limiter.go +++ b/test/mocks/limiter/limiter.go @@ -6,10 +6,12 @@ package mock_limiter import ( context "context" + reflect "reflect" + envoy_service_ratelimit_v3 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - config "github.com/envoyproxy/ratelimit/src/config" gomock "github.com/golang/mock/gomock" - reflect "reflect" + + config "github.com/envoyproxy/ratelimit/src/config" ) // MockRateLimitCache is a mock of RateLimitCache interface diff --git a/test/mocks/memcached/client.go b/test/mocks/memcached/client.go index 433105bd..b00abf1e 100644 --- a/test/mocks/memcached/client.go +++ b/test/mocks/memcached/client.go @@ -5,9 +5,10 @@ package mock_memcached import ( + reflect "reflect" + memcache "github.com/bradfitz/gomemcache/memcache" gomock "github.com/golang/mock/gomock" - reflect "reflect" ) // MockClient is a mock of Client interface diff --git a/test/mocks/redis/redis.go b/test/mocks/redis/redis.go index 032b500d..2d6d059f 100644 --- a/test/mocks/redis/redis.go +++ b/test/mocks/redis/redis.go @@ -5,9 +5,11 @@ package mock_redis import ( - redis "github.com/envoyproxy/ratelimit/src/redis" - gomock "github.com/golang/mock/gomock" reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + redis "github.com/envoyproxy/ratelimit/src/redis" ) // MockClient is a mock of Client interface diff --git a/test/mocks/rls/rls.go b/test/mocks/rls/rls.go index 92d79b9a..57fdb2f7 100644 --- a/test/mocks/rls/rls.go +++ b/test/mocks/rls/rls.go @@ -6,9 +6,10 @@ package mock_v3 import ( context "context" + reflect "reflect" + envoy_service_ratelimit_v3 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" gomock "github.com/golang/mock/gomock" - reflect "reflect" ) // MockRateLimitServiceServer is a mock of RateLimitServiceServer interface diff --git a/test/mocks/runtime/loader/loader.go b/test/mocks/runtime/loader/loader.go index da00c649..58f238bb 100644 --- a/test/mocks/runtime/loader/loader.go +++ b/test/mocks/runtime/loader/loader.go @@ -5,9 +5,10 @@ package mock_loader import ( + reflect "reflect" + gomock "github.com/golang/mock/gomock" snapshot "github.com/lyft/goruntime/snapshot" - reflect "reflect" ) // MockIFace is a mock of IFace interface diff --git a/test/mocks/runtime/snapshot/snapshot.go b/test/mocks/runtime/snapshot/snapshot.go index a56fe5a5..ae62e0c2 100644 --- a/test/mocks/runtime/snapshot/snapshot.go +++ b/test/mocks/runtime/snapshot/snapshot.go @@ -5,10 +5,11 @@ package mock_snapshot import ( - gomock "github.com/golang/mock/gomock" - entry "github.com/lyft/goruntime/snapshot/entry" reflect "reflect" time "time" + + gomock "github.com/golang/mock/gomock" + entry "github.com/lyft/goruntime/snapshot/entry" ) // MockIFace is a mock of IFace interface diff --git a/test/mocks/stats/manager.go b/test/mocks/stats/manager.go index c0045b14..14850ac6 100644 --- a/test/mocks/stats/manager.go +++ b/test/mocks/stats/manager.go @@ -1,9 +1,10 @@ package stats import ( - "github.com/envoyproxy/ratelimit/src/stats" gostats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" + + "github.com/envoyproxy/ratelimit/src/stats" ) type MockStatManager struct { diff --git a/test/mocks/utils/utils.go b/test/mocks/utils/utils.go index 1812f4f0..44998db1 100644 --- a/test/mocks/utils/utils.go +++ b/test/mocks/utils/utils.go @@ -5,8 +5,9 @@ package mock_utils import ( - gomock "github.com/golang/mock/gomock" reflect "reflect" + + gomock "github.com/golang/mock/gomock" ) // MockTimeSource is a mock of TimeSource interface diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index b5376e63..66f6ccbb 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -2,6 +2,7 @@ package redis_test import ( "context" + "math/rand" "runtime" "testing" "time" @@ -9,12 +10,11 @@ import ( "github.com/envoyproxy/ratelimit/test/mocks/stats" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + gostats "github.com/lyft/gostats" + "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/redis" "github.com/envoyproxy/ratelimit/src/utils" - gostats "github.com/lyft/gostats" - - "math/rand" "github.com/envoyproxy/ratelimit/test/common" ) diff --git a/test/redis/driver_impl_test.go b/test/redis/driver_impl_test.go index b4858da1..f549a754 100644 --- a/test/redis/driver_impl_test.go +++ b/test/redis/driver_impl_test.go @@ -5,9 +5,10 @@ import ( "time" "github.com/alicebob/miniredis/v2" - "github.com/envoyproxy/ratelimit/src/redis" stats "github.com/lyft/gostats" "github.com/stretchr/testify/assert" + + "github.com/envoyproxy/ratelimit/src/redis" ) func mustNewRedisServer() *miniredis.Miniredis { diff --git a/test/redis/fixed_cache_impl_test.go b/test/redis/fixed_cache_impl_test.go index b13cd946..e28da4b5 100644 --- a/test/redis/fixed_cache_impl_test.go +++ b/test/redis/fixed_cache_impl_test.go @@ -1,6 +1,7 @@ package redis_test import ( + "math/rand" "testing" "github.com/envoyproxy/ratelimit/test/mocks/stats" @@ -9,19 +10,19 @@ import ( "github.com/mediocregopher/radix/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + gostats "github.com/lyft/gostats" + "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" "github.com/envoyproxy/ratelimit/src/utils" - gostats "github.com/lyft/gostats" - "math/rand" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" "github.com/envoyproxy/ratelimit/test/common" mock_redis "github.com/envoyproxy/ratelimit/test/mocks/redis" mock_utils "github.com/envoyproxy/ratelimit/test/mocks/utils" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" ) func TestRedis(t *testing.T) { @@ -89,10 +90,13 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { }, 1) limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false), + } assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[1].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[1].Stats.OverLimit.Value()) @@ -117,11 +121,13 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { }, 1) limits = []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false, false), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false), + } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -194,11 +200,13 @@ func TestOverLimitWithLocalCache(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false), + } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -218,7 +226,8 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -238,7 +247,8 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -256,7 +266,8 @@ func TestOverLimitWithLocalCache(t *testing.T) { "EXPIRE", "domain_key4_value4_997200", int64(3600)).Times(0) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(4), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) @@ -289,11 +300,13 @@ func TestNearLimit(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false), + } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -309,7 +322,8 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -326,7 +340,8 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -491,11 +506,13 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, true)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, true), + } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -515,7 +532,8 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -536,7 +554,8 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { // The result should be OK since limit is in ShadowMode assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -556,7 +575,8 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { // The result should be OK since limit is in ShadowMode assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) // TODO: How should we handle statistics? Should there be a separate ShadowMode statistics? Should the other Stats remain as if they were unaffected by shadowmode? assert.Equal(uint64(4), limits[0].Stats.TotalHits.Value()) diff --git a/test/server/health_test.go b/test/server/health_test.go index a79e3642..d9610507 100644 --- a/test/server/health_test.go +++ b/test/server/health_test.go @@ -8,10 +8,11 @@ import ( "syscall" "testing" - "github.com/envoyproxy/ratelimit/src/server" "google.golang.org/grpc" "google.golang.org/grpc/health" healthpb "google.golang.org/grpc/health/grpc_health_v1" + + "github.com/envoyproxy/ratelimit/src/server" ) func TestHealthCheck(t *testing.T) { @@ -42,7 +43,6 @@ func TestHealthCheck(t *testing.T) { if 500 != recorder.Code { t.Errorf("expected code 500 actual %d", recorder.Code) } - } func TestGrpcHealthCheck(t *testing.T) { diff --git a/test/server/server_impl_test.go b/test/server/server_impl_test.go index 8ee22161..19a8e59f 100644 --- a/test/server/server_impl_test.go +++ b/test/server/server_impl_test.go @@ -2,20 +2,22 @@ package server_test import ( "fmt" - "github.com/golang/protobuf/proto" - "github.com/stretchr/testify/mock" "io/ioutil" "net/http" "net/http/httptest" "strings" "testing" + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/mock" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/envoyproxy/ratelimit/src/server" - mock_v3 "github.com/envoyproxy/ratelimit/test/mocks/rls" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + + "github.com/envoyproxy/ratelimit/src/server" + mock_v3 "github.com/envoyproxy/ratelimit/test/mocks/rls" ) func assertHttpResponse(t *testing.T, diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index 77e5bf95..ead86ab6 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -12,6 +12,11 @@ import ( core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/golang/mock/gomock" + gostats "github.com/lyft/gostats" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" + "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/redis" ratelimit "github.com/envoyproxy/ratelimit/src/service" @@ -21,10 +26,6 @@ import ( mock_loader "github.com/envoyproxy/ratelimit/test/mocks/runtime/loader" mock_snapshot "github.com/envoyproxy/ratelimit/test/mocks/runtime/snapshot" mock_stats "github.com/envoyproxy/ratelimit/test/mocks/stats" - "github.com/golang/mock/gomock" - gostats "github.com/lyft/gostats" - "github.com/stretchr/testify/assert" - "golang.org/x/net/context" ) type barrier struct { @@ -118,7 +119,8 @@ func TestService(test *testing.T) { t.assert, &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, - Statuses: []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}}, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}, + }, response) t.assert.Nil(err) @@ -135,12 +137,15 @@ func TestService(test *testing.T) { "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), - nil} + nil, + } t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) t.cache.EXPECT().DoLimit(nil, request, limits).Return( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + }) response, err = service.ShouldRateLimit(nil, request) common.AssertProtoEqual( t.assert, @@ -149,7 +154,8 @@ func TestService(test *testing.T) { Statuses: []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, - }}, + }, + }, response) t.assert.Nil(err) @@ -166,12 +172,15 @@ func TestService(test *testing.T) { // Config should still be valid. Also make sure order does not affect results. limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), + } t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) t.cache.EXPECT().DoLimit(nil, request, limits).Return( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0}}) + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0}, + }) response, err = service.ShouldRateLimit(nil, request) common.AssertProtoEqual( t.assert, @@ -180,7 +189,8 @@ func TestService(test *testing.T) { Statuses: []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0}, - }}, + }, + }, response) t.assert.Nil(err) @@ -216,12 +226,15 @@ func TestServiceGlobalShadowMode(test *testing.T) { // Global Shadow mode limits := []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), - nil} + nil, + } t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) t.cache.EXPECT().DoLimit(nil, request, limits).Return( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + }) response, err := service.ShouldRateLimit(nil, request) // OK overall code even if limit response was OVER_LIMIT @@ -232,7 +245,8 @@ func TestServiceGlobalShadowMode(test *testing.T) { Statuses: []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, - }}, + }, + }, response) t.assert.Nil(err) @@ -252,12 +266,15 @@ func TestRuleShadowMode(test *testing.T) { "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true), + } t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) t.cache.EXPECT().DoLimit(nil, request, limits).Return( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + }) response, err := service.ShouldRateLimit(nil, request) t.assert.Equal( &pb.RateLimitResponse{ @@ -265,7 +282,8 @@ func TestRuleShadowMode(test *testing.T) { Statuses: []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, - }}, + }, + }, response) t.assert.Nil(err) @@ -281,7 +299,8 @@ func TestMixedRuleShadowMode(test *testing.T) { "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), + } t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) testResults := []pb.RateLimitResponse_Code{pb.RateLimitResponse_OVER_LIMIT, pb.RateLimitResponse_OVER_LIMIT} @@ -291,8 +310,10 @@ func TestMixedRuleShadowMode(test *testing.T) { } } t.cache.EXPECT().DoLimit(nil, request, limits).Return( - []*pb.RateLimitResponse_DescriptorStatus{{Code: testResults[0], CurrentLimit: limits[0].Limit, LimitRemaining: 0}, - {Code: testResults[1], CurrentLimit: nil, LimitRemaining: 0}}) + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: testResults[0], CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: testResults[1], CurrentLimit: nil, LimitRemaining: 0}, + }) response, err := service.ShouldRateLimit(nil, request) t.assert.Equal( &pb.RateLimitResponse{ @@ -300,7 +321,8 @@ func TestMixedRuleShadowMode(test *testing.T) { Statuses: []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: nil, LimitRemaining: 0}, - }}, + }, + }, response) t.assert.Nil(err) @@ -336,12 +358,15 @@ func TestServiceWithCustomRatelimitHeaders(test *testing.T) { "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), - nil} + nil, + } t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) t.cache.EXPECT().DoLimit(nil, request, limits).Return( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + }) response, err := service.ShouldRateLimit(nil, request) common.AssertProtoEqual( @@ -385,12 +410,15 @@ func TestServiceWithDefaultRatelimitHeaders(test *testing.T) { "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), - nil} + nil, + } t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) t.cache.EXPECT().DoLimit(nil, request, limits).Return( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + }) response, err := service.ShouldRateLimit(nil, request) common.AssertProtoEqual( @@ -487,7 +515,8 @@ func TestUnlimited(test *testing.T) { limits := []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("foo_bar"), false, false), nil, - config.NewRateLimit(55, pb.RateLimitResponse_RateLimit_SECOND, t.statsManager.NewStats("baz_qux"), true, false)} + config.NewRateLimit(55, pb.RateLimitResponse_RateLimit_SECOND, t.statsManager.NewStats("baz_qux"), true, false), + } t.config.EXPECT().GetLimit(nil, "some-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "some-domain", request.Descriptors[1]).Return(limits[1]) t.config.EXPECT().GetLimit(nil, "some-domain", request.Descriptors[2]).Return(limits[2]) @@ -510,7 +539,8 @@ func TestUnlimited(test *testing.T) { {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9}, {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: math.MaxUint32}, - }}, + }, + }, response) t.assert.Nil(err) } diff --git a/test/srv/srv_test.go b/test/srv/srv_test.go index 5e3e8f79..55cb3113 100644 --- a/test/srv/srv_test.go +++ b/test/srv/srv_test.go @@ -5,8 +5,9 @@ import ( "net" "testing" - "github.com/envoyproxy/ratelimit/src/srv" "github.com/stretchr/testify/assert" + + "github.com/envoyproxy/ratelimit/src/srv" ) func TestParseSrv(t *testing.T) { From b163a2d5fe344bf7603a5aae83d0cd5fb9e56088 Mon Sep 17 00:00:00 2001 From: James Fish Date: Mon, 25 Oct 2021 11:16:41 -0700 Subject: [PATCH 027/181] Do not panic on debug /rlconfig if no config loaded (#306) * Do not panic on debug /rlconfig if no config loaded Signed-off-by: James Fish * Format Signed-off-by: James Fish * Collapse if Signed-off-by: James Fish --- src/service_cmd/runner/runner.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 8be85288..3d616229 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -117,7 +117,9 @@ func (runner *Runner) Run() { "/rlconfig", "print out the currently loaded configuration for debugging", func(writer http.ResponseWriter, request *http.Request) { - io.WriteString(writer, service.GetCurrentConfig().Dump()) + if current := service.GetCurrentConfig(); current != nil { + io.WriteString(writer, current.Dump()) + } }) srv.AddJsonHandler(service) From 330a681fe3127e14302617ecf42323f1e4690a40 Mon Sep 17 00:00:00 2001 From: debbyku <32248438+debbyku@users.noreply.github.com> Date: Sun, 7 Nov 2021 08:25:58 +0800 Subject: [PATCH 028/181] health check failed if no active redis connection (#310) Signed-off-by: debbyku --- README.md | 7 +++++++ src/redis/cache_impl.go | 4 ++-- src/redis/driver_impl.go | 17 ++++++++++++----- src/server/health.go | 5 +++++ src/server/server.go | 3 +++ src/server/server_impl.go | 8 ++++++++ src/service_cmd/runner/runner.go | 3 ++- src/settings/settings.go | 3 ++- test/redis/bench_test.go | 2 +- test/redis/driver_impl_test.go | 6 +++--- 10 files changed, 45 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 33e367ba..656e615a 100644 --- a/README.md +++ b/README.md @@ -42,6 +42,7 @@ - [Pipelining](#pipelining) - [One Redis Instance](#one-redis-instance) - [Two Redis Instances](#two-redis-instances) + - [Health Checking for Redis Active Connection](#health-checking-for-redis-active-connection) - [Memcache](#memcache) - [Custom headers](#custom-headers) - [Contact](#contact) @@ -711,6 +712,12 @@ To configure two Redis instances use the following environment variables: This setup will use the Redis server configured with the `_PERSECOND_` vars for per second limits, and the other Redis server for all other limits. +## Health Checking for Redis Active Connection + +To configure whether to return health check failure if there is no active redis connection + +1. `REDIS_HEALTH_CHECK_ACTIVE_CONNECTION` : (default is "false") + # Memcache Experimental Memcache support has been added as an alternative to Redis in v1.5. diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index 1c9417d7..715e670d 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -16,11 +16,11 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca var perSecondPool Client if s.RedisPerSecond { perSecondPool = NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, s.RedisPerSecondSocketType, - s.RedisPerSecondType, s.RedisPerSecondUrl, s.RedisPerSecondPoolSize, s.RedisPerSecondPipelineWindow, s.RedisPerSecondPipelineLimit, s.RedisTlsConfig) + s.RedisPerSecondType, s.RedisPerSecondUrl, s.RedisPerSecondPoolSize, s.RedisPerSecondPipelineWindow, s.RedisPerSecondPipelineLimit, s.RedisTlsConfig, s.RedisHealthCheckActiveConnection, srv) } var otherPool Client otherPool = NewClientImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisSocketType, s.RedisType, s.RedisUrl, s.RedisPoolSize, - s.RedisPipelineWindow, s.RedisPipelineLimit, s.RedisTlsConfig) + s.RedisPipelineWindow, s.RedisPipelineLimit, s.RedisTlsConfig, s.RedisHealthCheckActiveConnection, srv) return NewFixedRateLimitCacheImpl( otherPool, diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index 18a65df1..8044d3dc 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -6,11 +6,12 @@ import ( "strings" "time" - "github.com/mediocregopher/radix/v3/trace" - stats "github.com/lyft/gostats" "github.com/mediocregopher/radix/v3" + "github.com/mediocregopher/radix/v3/trace" logger "github.com/sirupsen/logrus" + + "github.com/envoyproxy/ratelimit/src/server" ) type poolStats struct { @@ -27,15 +28,21 @@ func newPoolStats(scope stats.Scope) poolStats { return ret } -func poolTrace(ps *poolStats) trace.PoolTrace { +func poolTrace(ps *poolStats, healthCheckActiveConnection bool, srv server.Server) trace.PoolTrace { return trace.PoolTrace{ ConnCreated: func(_ trace.PoolConnCreated) { ps.connectionTotal.Add(1) ps.connectionActive.Add(1) + if healthCheckActiveConnection && srv != nil { + srv.HealthCheckOK() + } }, ConnClosed: func(_ trace.PoolConnClosed) { ps.connectionActive.Sub(1) ps.connectionClose.Add(1) + if healthCheckActiveConnection && srv != nil && ps.connectionActive.Value() == 0 { + srv.HealthCheckFail() + } }, } } @@ -53,7 +60,7 @@ func checkError(err error) { } func NewClientImpl(scope stats.Scope, useTls bool, auth, redisSocketType, redisType, url string, poolSize int, - pipelineWindow time.Duration, pipelineLimit int, tlsConfig *tls.Config) Client { + pipelineWindow time.Duration, pipelineLimit int, tlsConfig *tls.Config, healthCheckActiveConnection bool, srv server.Server) Client { logger.Warnf("connecting to redis on %s with pool size %d", url, poolSize) df := func(network, addr string) (radix.Conn, error) { @@ -78,7 +85,7 @@ func NewClientImpl(scope stats.Scope, useTls bool, auth, redisSocketType, redisT stats := newPoolStats(scope) - opts := []radix.PoolOpt{radix.PoolConnFunc(df), radix.PoolWithTrace(poolTrace(&stats))} + opts := []radix.PoolOpt{radix.PoolConnFunc(df), radix.PoolWithTrace(poolTrace(&stats, healthCheckActiveConnection, srv))} implicitPipelining := true if pipelineWindow == 0 && pipelineLimit == 0 { diff --git a/src/server/health.go b/src/server/health.go index d5ba5ac8..d2eb2b76 100644 --- a/src/server/health.go +++ b/src/server/health.go @@ -51,6 +51,11 @@ func (hc *HealthChecker) Fail() { hc.grpc.SetServingStatus(hc.name, healthpb.HealthCheckResponse_NOT_SERVING) } +func (hc *HealthChecker) Ok() { + atomic.StoreUint32(&hc.ok, 1) + hc.grpc.SetServingStatus(hc.name, healthpb.HealthCheckResponse_SERVING) +} + func (hc *HealthChecker) Server() *health.Server { return hc.grpc } diff --git a/src/server/server.go b/src/server/server.go index 7f2b1b05..46c8ea5d 100644 --- a/src/server/server.go +++ b/src/server/server.go @@ -43,4 +43,7 @@ type Server interface { * Stops serving the grpc port (for integration testing). */ Stop() + + HealthCheckFail() + HealthCheckOK() } diff --git a/src/server/server_impl.go b/src/server/server_impl.go index bfd66616..bab235ec 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -311,3 +311,11 @@ func (server *server) handleGracefulShutdown() { os.Exit(0) }() } + +func (server *server) HealthCheckFail() { + server.health.Fail() +} + +func (server *server) HealthCheckOK() { + server.health.Ok() +} diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 3d616229..e6f3ccfd 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -57,7 +57,8 @@ func createLimiter(srv server.Server, s settings.Settings, localCache *freecache utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), s.ExpirationJitterMaxSeconds, - statsManager) + statsManager, + ) case "memcache": return memcached.NewRateLimitCacheImplFromSettings( s, diff --git a/src/settings/settings.go b/src/settings/settings.go index c9afb210..5190dc20 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -88,7 +88,8 @@ type Settings struct { // RedisPerSecondPipelineLimit sets maximum number of commands that can be pipelined before flushing for per second redis. // See comments of RedisPipelineLimit for details. RedisPerSecondPipelineLimit int `envconfig:"REDIS_PERSECOND_PIPELINE_LIMIT" default:"0"` - + // Enable healthcheck to check Redis Connection. If there is no active connection, healthcheck failed. + RedisHealthCheckActiveConnection bool `envconfig:"REDIS_HEALTH_CHECK_ACTIVE_CONNECTION" default:"false"` // Memcache settings MemcacheHostPort []string `envconfig:"MEMCACHE_HOST_PORT" default:""` // MemcacheMaxIdleConns sets the maximum number of idle TCP connections per memcached node. diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index 66f6ccbb..27eaf6d6 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -44,7 +44,7 @@ func BenchmarkParallelDoLimit(b *testing.B) { return func(b *testing.B) { statsStore := gostats.NewStore(gostats.NewNullSink(), false) sm := stats.NewMockStatManager(statsStore) - client := redis.NewClientImpl(statsStore, false, "", "tcp", "single", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit, nil) + client := redis.NewClientImpl(statsStore, false, "", "tcp", "single", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit, nil, false, nil) defer client.Close() cache := redis.NewFixedRateLimitCacheImpl(client, nil, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, "", sm) diff --git a/test/redis/driver_impl_test.go b/test/redis/driver_impl_test.go index f549a754..3f924e4a 100644 --- a/test/redis/driver_impl_test.go +++ b/test/redis/driver_impl_test.go @@ -37,7 +37,7 @@ func testNewClientImpl(t *testing.T, pipelineWindow time.Duration, pipelineLimit statsStore := stats.NewStore(stats.NewNullSink(), false) mkRedisClient := func(auth, addr string) redis.Client { - return redis.NewClientImpl(statsStore, false, auth, "tcp", "single", addr, 1, pipelineWindow, pipelineLimit, nil) + return redis.NewClientImpl(statsStore, false, auth, "tcp", "single", addr, 1, pipelineWindow, pipelineLimit, nil, false, nil) } t.Run("connection refused", func(t *testing.T) { @@ -104,7 +104,7 @@ func TestDoCmd(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) mkRedisClient := func(addr string) redis.Client { - return redis.NewClientImpl(statsStore, false, "", "tcp", "single", addr, 1, 0, 0, nil) + return redis.NewClientImpl(statsStore, false, "", "tcp", "single", addr, 1, 0, 0, nil, false, nil) } t.Run("SETGET ok", func(t *testing.T) { @@ -149,7 +149,7 @@ func testPipeDo(t *testing.T, pipelineWindow time.Duration, pipelineLimit int) f statsStore := stats.NewStore(stats.NewNullSink(), false) mkRedisClient := func(addr string) redis.Client { - return redis.NewClientImpl(statsStore, false, "", "tcp", "single", addr, 1, pipelineWindow, pipelineLimit, nil) + return redis.NewClientImpl(statsStore, false, "", "tcp", "single", addr, 1, pipelineWindow, pipelineLimit, nil, false, nil) } t.Run("SETGET ok", func(t *testing.T) { From f2c9a1675cc1938d76e2dd665dd83826a1495cbd Mon Sep 17 00:00:00 2001 From: petedmarsh Date: Tue, 9 Nov 2021 17:25:27 +0100 Subject: [PATCH 029/181] Tests for memcache hosts via SRV (#298) Signed-off-by: Peter Marsh --- src/memcached/cache_impl.go | 16 +++--- src/memcached/cache_impl_test.go | 94 ++++++++++++++++++++++++++++++++ src/srv/srv.go | 8 ++- test/mocks/mocks.go | 1 + test/mocks/srv/srv.go | 49 +++++++++++++++++ test/srv/srv_test.go | 9 ++- 6 files changed, 165 insertions(+), 12 deletions(-) create mode 100644 src/memcached/cache_impl_test.go create mode 100644 test/mocks/srv/srv.go diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index 5ddbe664..9d00bec7 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -177,13 +177,13 @@ func (this *rateLimitMemcacheImpl) Flush() { this.waitGroup.Wait() } -func refreshServersPeriodically(serverList *memcache.ServerList, srv string, d time.Duration, finish <-chan struct{}) { +func refreshServersPeriodically(serverList *memcache.ServerList, srv string, d time.Duration, resolver srv.SrvResolver, finish <-chan struct{}) { t := time.NewTicker(d) defer t.Stop() for { select { case <-t.C: - err := refreshServers(serverList, srv) + err := refreshServers(serverList, srv, resolver) if err != nil { logger.Warn("failed to refresh memcahce hosts") } else { @@ -195,8 +195,8 @@ func refreshServersPeriodically(serverList *memcache.ServerList, srv string, d t } } -func refreshServers(serverList *memcache.ServerList, srv_ string) error { - servers, err := srv.ServerStringsFromSrv(srv_) +func refreshServers(serverList *memcache.ServerList, srv string, resolver srv.SrvResolver) error { + servers, err := resolver.ServerStringsFromSrv(srv) if err != nil { return err } @@ -207,9 +207,9 @@ func refreshServers(serverList *memcache.ServerList, srv_ string) error { return nil } -func newMemcachedFromSrv(srv_ string, d time.Duration) Client { +func newMemcachedFromSrv(srv string, d time.Duration, resolver srv.SrvResolver) Client { serverList := new(memcache.ServerList) - err := refreshServers(serverList, srv_) + err := refreshServers(serverList, srv, resolver) if err != nil { errorText := "Unable to fetch servers from SRV" logger.Errorf(errorText) @@ -219,7 +219,7 @@ func newMemcachedFromSrv(srv_ string, d time.Duration) Client { if d > 0 { logger.Infof("refreshing memcache hosts every: %v milliseconds", d.Milliseconds()) finish := make(chan struct{}) - go refreshServersPeriodically(serverList, srv_, d, finish) + go refreshServersPeriodically(serverList, srv, d, resolver, finish) } else { logger.Debugf("not periodically refreshing memcached hosts") } @@ -233,7 +233,7 @@ func newMemcacheFromSettings(s settings.Settings) Client { } if s.MemcacheSrv != "" { logger.Debugf("Using MEMCACHE_SRV: %v", s.MemcacheSrv) - return newMemcachedFromSrv(s.MemcacheSrv, s.MemcacheSrvRefresh) + return newMemcachedFromSrv(s.MemcacheSrv, s.MemcacheSrvRefresh, new(srv.DnsSrvResolver)) } logger.Debugf("Usng MEMCACHE_HOST_PORT:: %v", s.MemcacheHostPort) client := memcache.New(s.MemcacheHostPort...) diff --git a/src/memcached/cache_impl_test.go b/src/memcached/cache_impl_test.go new file mode 100644 index 00000000..226f95ca --- /dev/null +++ b/src/memcached/cache_impl_test.go @@ -0,0 +1,94 @@ +package memcached + +import ( + "errors" + "net" + "testing" + + "github.com/bradfitz/gomemcache/memcache" + "github.com/stretchr/testify/assert" + + "github.com/golang/mock/gomock" + + mock_srv "github.com/envoyproxy/ratelimit/test/mocks/srv" +) + +func TestRefreshServersSetsServersOnEmptyServerList(t *testing.T) { + assert := assert.New(t) + + mockSrv := "_memcache._tcp.example.org" + mockMemcacheHostPort := "127.0.0.1:11211" + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockSrvResolver := mock_srv.NewMockSrvResolver(ctrl) + + mockSrvResolver.EXPECT().ServerStringsFromSrv(gomock.Eq(mockSrv)).Return([]string{mockMemcacheHostPort}, nil) + + serverList := new(memcache.ServerList) + + refreshServers(serverList, mockSrv, mockSrvResolver) + + actualMemcacheHosts := []string{} + + serverList.Each(func(addr net.Addr) error { + actualMemcacheHosts = append(actualMemcacheHosts, addr.String()) + return nil + }) + + assert.Equal([]string{mockMemcacheHostPort}, actualMemcacheHosts) +} + +func TestRefreshServersOverridesServersOnNonEmptyServerList(t *testing.T) { + assert := assert.New(t) + + mockSrv := "_memcache._tcp.example.org" + mockMemcacheHostPort := "127.0.0.1:11211" + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockSrvResolver := mock_srv.NewMockSrvResolver(ctrl) + + mockSrvResolver.EXPECT().ServerStringsFromSrv(gomock.Eq(mockSrv)).Return([]string{mockMemcacheHostPort}, nil) + + serverList := new(memcache.ServerList) + serverList.SetServers("127.0.0.2:11211", "127.0.0.3:11211") + + refreshServers(serverList, mockSrv, mockSrvResolver) + + actualMemcacheHosts := []string{} + + serverList.Each(func(addr net.Addr) error { + actualMemcacheHosts = append(actualMemcacheHosts, addr.String()) + return nil + }) + + assert.Equal([]string{mockMemcacheHostPort}, actualMemcacheHosts) +} + +func TestRefreshServerSetsServersDoesNotChangeAnythingIfThereIsAnError(t *testing.T) { + assert := assert.New(t) + + mockSrv := "_memcache._tcp.example.org" + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockSrvResolver := mock_srv.NewMockSrvResolver(ctrl) + + mockSrvResolver.EXPECT().ServerStringsFromSrv(gomock.Eq(mockSrv)).Return(nil, errors.New("some error")) + + originalServers := []string{"127.0.0.2:11211", "127.0.0.3:11211"} + serverList := new(memcache.ServerList) + serverList.SetServers(originalServers...) + + refreshServers(serverList, mockSrv, mockSrvResolver) + + actualMemcacheHosts := []string{} + + serverList.Each(func(addr net.Addr) error { + actualMemcacheHosts = append(actualMemcacheHosts, addr.String()) + return nil + }) + + assert.Equal(originalServers, actualMemcacheHosts) +} diff --git a/src/srv/srv.go b/src/srv/srv.go index 7262d1b4..9a9aff51 100644 --- a/src/srv/srv.go +++ b/src/srv/srv.go @@ -11,6 +11,12 @@ import ( var srvRegex = regexp.MustCompile(`^_(.+?)\._(.+?)\.(.+)$`) +type SrvResolver interface { + ServerStringsFromSrv(srv string) ([]string, error) +} + +type DnsSrvResolver struct{} + func ParseSrv(srv string) (string, string, string, error) { matches := srvRegex.FindStringSubmatch(srv) if matches == nil { @@ -21,7 +27,7 @@ func ParseSrv(srv string) (string, string, string, error) { return matches[1], matches[2], matches[3], nil } -func ServerStringsFromSrv(srv string) ([]string, error) { +func (dnsSrvResolver DnsSrvResolver) ServerStringsFromSrv(srv string) ([]string, error) { service, proto, name, err := ParseSrv(srv) if err != nil { logger.Errorf("failed to parse SRV: %s", err) diff --git a/test/mocks/mocks.go b/test/mocks/mocks.go index 2aafcb30..6370e2a1 100644 --- a/test/mocks/mocks.go +++ b/test/mocks/mocks.go @@ -8,3 +8,4 @@ package mocks //go:generate go run github.com/golang/mock/mockgen -destination ./utils/utils.go github.com/envoyproxy/ratelimit/src/utils TimeSource,JitterRandSource //go:generate go run github.com/golang/mock/mockgen -destination ./memcached/client.go github.com/envoyproxy/ratelimit/src/memcached Client //go:generate go run github.com/golang/mock/mockgen -destination ./rls/rls.go github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3 RateLimitServiceServer +//go:generate go run github.com/golang/mock/mockgen -destination ./srv/srv.go github.com/envoyproxy/ratelimit/src/srv SrvResolver diff --git a/test/mocks/srv/srv.go b/test/mocks/srv/srv.go new file mode 100644 index 00000000..1c7ef614 --- /dev/null +++ b/test/mocks/srv/srv.go @@ -0,0 +1,49 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/envoyproxy/ratelimit/src/srv (interfaces: SrvResolver) + +// Package mock_srv is a generated GoMock package. +package mock_srv + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockSrvResolver is a mock of SrvResolver interface +type MockSrvResolver struct { + ctrl *gomock.Controller + recorder *MockSrvResolverMockRecorder +} + +// MockSrvResolverMockRecorder is the mock recorder for MockSrvResolver +type MockSrvResolverMockRecorder struct { + mock *MockSrvResolver +} + +// NewMockSrvResolver creates a new mock instance +func NewMockSrvResolver(ctrl *gomock.Controller) *MockSrvResolver { + mock := &MockSrvResolver{ctrl: ctrl} + mock.recorder = &MockSrvResolverMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockSrvResolver) EXPECT() *MockSrvResolverMockRecorder { + return m.recorder +} + +// ServerStringsFromSrv mocks base method +func (m *MockSrvResolver) ServerStringsFromSrv(arg0 string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ServerStringsFromSrv", arg0) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ServerStringsFromSrv indicates an expected call of ServerStringsFromSrv +func (mr *MockSrvResolverMockRecorder) ServerStringsFromSrv(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServerStringsFromSrv", reflect.TypeOf((*MockSrvResolver)(nil).ServerStringsFromSrv), arg0) +} diff --git a/test/srv/srv_test.go b/test/srv/srv_test.go index 55cb3113..e5e273bf 100644 --- a/test/srv/srv_test.go +++ b/test/srv/srv_test.go @@ -28,12 +28,14 @@ func TestParseSrv(t *testing.T) { } func TestServerStringsFromSrvWhenSrvIsNotWellFormed(t *testing.T) { - _, err := srv.ServerStringsFromSrv("example.org") + srvResolver := srv.DnsSrvResolver{} + _, err := srvResolver.ServerStringsFromSrv("example.org") assert.Equal(t, err, errors.New("could not parse example.org to SRV parts")) } func TestServerStringsFromSevWhenSrvIsWellFormedButNotLookupable(t *testing.T) { - _, err := srv.ServerStringsFromSrv("_something._tcp.example.invalid") + srvResolver := srv.DnsSrvResolver{} + _, err := srvResolver.ServerStringsFromSrv("_something._tcp.example.invalid") var e *net.DNSError if errors.As(err, &e) { assert.Equal(t, e.Err, "no such host") @@ -48,7 +50,8 @@ func TestServerStringsFromSevWhenSrvIsWellFormedButNotLookupable(t *testing.T) { func TestServerStrings(t *testing.T) { // it seems reasonable to think _xmpp-server._tcp.gmail.com will be available for a long time! - servers, err := srv.ServerStringsFromSrv("_xmpp-server._tcp.gmail.com.") + srvResolver := srv.DnsSrvResolver{} + servers, err := srvResolver.ServerStringsFromSrv("_xmpp-server._tcp.gmail.com.") assert.True(t, len(servers) > 0) for _, s := range servers { assert.Regexp(t, `^.*xmpp-server.*google.com.:\d+$`, s) From 0a85f5163ee79e3e89d001752ddee0755c463e84 Mon Sep 17 00:00:00 2001 From: debbyku <32248438+debbyku@users.noreply.github.com> Date: Sat, 27 Nov 2021 05:32:28 +0800 Subject: [PATCH 030/181] Incorrectly count redis active connection (#312) * health check failed if no active redis connection Signed-off-by: debbyku * solve redis connection active count error Signed-off-by: debbyku --- src/redis/driver_impl.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index 8044d3dc..6014774f 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -30,11 +30,15 @@ func newPoolStats(scope stats.Scope) poolStats { func poolTrace(ps *poolStats, healthCheckActiveConnection bool, srv server.Server) trace.PoolTrace { return trace.PoolTrace{ - ConnCreated: func(_ trace.PoolConnCreated) { - ps.connectionTotal.Add(1) - ps.connectionActive.Add(1) - if healthCheckActiveConnection && srv != nil { - srv.HealthCheckOK() + ConnCreated: func(newConn trace.PoolConnCreated) { + if newConn.Err == nil { + ps.connectionTotal.Add(1) + ps.connectionActive.Add(1) + if healthCheckActiveConnection && srv != nil { + srv.HealthCheckOK() + } + } else { + fmt.Println("creating redis connection error :", newConn.Err) } }, ConnClosed: func(_ trace.PoolConnClosed) { From 4d2efd61904a55599ead35830ba6cc42deceb099 Mon Sep 17 00:00:00 2001 From: petedmarsh Date: Wed, 22 Dec 2021 22:29:08 +0000 Subject: [PATCH 031/181] Fix memcached shards being inconsistent when looking up hosts by SRV (#316) SRV records return hosts in an arbitrary order, but the memcached library in use relies on the order of the hosts for sharding (i.e. the i-th host passed to the client will host shard i). If multiple ratelimitinstances looked up the same set of memcached hosts via SRV they could recieve them in different orders and begin sharding descriptor keys to separate hosts, leading to non-global rate limiting (as the sum of hits would be split accross hosts, meaning each ratelimit instance would have an incomplete total of hits). This fixes the issue by sorting the hosts returned by the SRV lexicographically. Both weight and priority are ignored; weight was not previously handled and priority is not possible to handle (as the memcached library cannot take a priority into account when determining shards). Signed-off-by: Peter Marsh --- src/srv/srv.go | 13 ++++++++++++- src/srv/srv_test.go | 18 ++++++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) create mode 100644 src/srv/srv_test.go diff --git a/src/srv/srv.go b/src/srv/srv.go index 9a9aff51..e591c147 100644 --- a/src/srv/srv.go +++ b/src/srv/srv.go @@ -5,6 +5,7 @@ import ( "fmt" "net" "regexp" + "sort" logger "github.com/sirupsen/logrus" ) @@ -17,6 +18,8 @@ type SrvResolver interface { type DnsSrvResolver struct{} +type addrsLookup func(service, proto, name string) (cname string, addrs []*net.SRV, err error) + func ParseSrv(srv string) (string, string, string, error) { matches := srvRegex.FindStringSubmatch(srv) if matches == nil { @@ -28,13 +31,17 @@ func ParseSrv(srv string) (string, string, string, error) { } func (dnsSrvResolver DnsSrvResolver) ServerStringsFromSrv(srv string) ([]string, error) { + return lookupServerStringsFromSrv(srv, net.LookupSRV) +} + +func lookupServerStringsFromSrv(srv string, addrsLookup addrsLookup) ([]string, error) { service, proto, name, err := ParseSrv(srv) if err != nil { logger.Errorf("failed to parse SRV: %s", err) return nil, err } - _, srvs, err := net.LookupSRV(service, proto, name) + _, srvs, err := addrsLookup(service, proto, name) if err != nil { logger.Errorf("failed to lookup SRV: %s", err) return nil, err @@ -49,5 +56,9 @@ func (dnsSrvResolver DnsSrvResolver) ServerStringsFromSrv(srv string) ([]string, serversFromSrv[i] = fmt.Sprintf("%s:%v", srv.Target, srv.Port) } + // we sort the server strings (host:port) to make sure we ge a consistent order as + // bradfitz/gomemcache uses assigns shards based on order of given hosts + sort.Strings(serversFromSrv) + return serversFromSrv, nil } diff --git a/src/srv/srv_test.go b/src/srv/srv_test.go new file mode 100644 index 00000000..64a36682 --- /dev/null +++ b/src/srv/srv_test.go @@ -0,0 +1,18 @@ +package srv + +import ( + "net" + "testing" + + "github.com/stretchr/testify/assert" +) + +func mockAddrsLookup(service, proto, name string) (cname string, addrs []*net.SRV, err error) { + return "ignored", []*net.SRV{{"z", 1, 0, 0}, {"z", 0, 0, 0}, {"a", 9001, 0, 0}}, nil +} + +func TestLookupServerStringsFromSrvReturnsServersSorted(t *testing.T) { + targets, err := lookupServerStringsFromSrv("_something._tcp.example.org.", mockAddrsLookup) + assert.Nil(t, err) + assert.Equal(t, targets, []string{"a:9001", "z:0", "z:1"}) +} From 49fbf58e4e78a9cb99e248cc55e7b705a8a23bf5 Mon Sep 17 00:00:00 2001 From: Vito Sabella Date: Tue, 25 Jan 2022 01:33:06 +0700 Subject: [PATCH 032/181] TLS config pointer is never nil which regresses TLS Support (#318) * TLS config pointer is never nil, fixes regression in pull #289 fixes isssue #303 Signed-off-by: Vito Sabella * Accidentally put the initialization before the environment variable reading. Signed-off-by: Vito Sabella * Unit test for settings tlsConfig fix Signed-off-by: Vito Sabella * Fixing pre-commits Signed-off-by: Vito Sabella Co-authored-by: Vito Sabella --- src/redis/driver_impl.go | 6 +----- src/settings/settings.go | 7 ++++++- src/settings/settings_test.go | 13 +++++++++++++ test/integration/integration_test.go | 4 +++- 4 files changed, 23 insertions(+), 7 deletions(-) create mode 100644 src/settings/settings_test.go diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index 6014774f..280a7cbb 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -71,11 +71,7 @@ func NewClientImpl(scope stats.Scope, useTls bool, auth, redisSocketType, redisT var dialOpts []radix.DialOpt if useTls { - if tlsConfig != nil { - dialOpts = append(dialOpts, radix.DialUseTLS(tlsConfig)) - } else { - dialOpts = append(dialOpts, radix.DialUseTLS(&tls.Config{})) - } + dialOpts = append(dialOpts, radix.DialUseTLS(tlsConfig)) } if auth != "" { diff --git a/src/settings/settings.go b/src/settings/settings.go index 5190dc20..fa724330 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -109,8 +109,13 @@ type Option func(*Settings) func NewSettings() Settings { var s Settings - err := envconfig.Process("", &s) + + // Golang copy-by-value causes the RootCAs to no longer be nil + // which isn't the expected default behavior of continuing to use system roots + // so let's just initialize to what we want the correct value to be. + s.RedisTlsConfig = &tls.Config{} + if err != nil { panic(err) } diff --git a/src/settings/settings_test.go b/src/settings/settings_test.go new file mode 100644 index 00000000..0a391b78 --- /dev/null +++ b/src/settings/settings_test.go @@ -0,0 +1,13 @@ +package settings + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSettingsTlsConfigUnmodified(t *testing.T) { + settings := NewSettings() + assert.NotNil(t, settings.RedisTlsConfig) + assert.Nil(t, settings.RedisTlsConfig.RootCAs) +} diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 2d269eb5..66b6d1b6 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -1,8 +1,10 @@ +//go:build integration // +build integration package integration_test import ( + "crypto/tls" "fmt" "io" "math/rand" @@ -223,7 +225,7 @@ func TestMultiNodeMemcache(t *testing.T) { func testBasicConfigAuthTLS(perSecond bool, local_cache_size int) func(*testing.T) { s := makeSimpleRedisSettings(16381, 16382, perSecond, local_cache_size) - s.RedisTlsConfig = nil + s.RedisTlsConfig = &tls.Config{} s.RedisAuth = "password123" s.RedisTls = true s.RedisPerSecondAuth = "password123" From 8d6488ead8618ce49a492858321dae946f2d97bc Mon Sep 17 00:00:00 2001 From: Vito Sabella Date: Tue, 25 Jan 2022 01:55:53 +0700 Subject: [PATCH 033/181] Upgrade to Golang v1.17 and Alpine 3.15 (#320) * Upgrade to Golang v1.17 and Alpine 3.15 Signed-off-by: Vito Sabella * Run pre-commit -a Signed-off-by: Vito Sabella Co-authored-by: Vito Sabella --- Dockerfile | 4 ++-- Dockerfile.integration | 2 +- Makefile | 1 + go.mod | 24 ++++++++++++++++++------ 4 files changed, 22 insertions(+), 9 deletions(-) diff --git a/Dockerfile b/Dockerfile index b0bdb0bd..8d9990a2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.14 AS build +FROM golang:1.17 AS build WORKDIR /ratelimit ENV GOPROXY=https://proxy.golang.org @@ -10,6 +10,6 @@ COPY script script RUN CGO_ENABLED=0 GOOS=linux go build -o /go/bin/ratelimit -ldflags="-w -s" -v github.com/envoyproxy/ratelimit/src/service_cmd -FROM alpine:3.11 AS final +FROM alpine:3.15 AS final RUN apk --no-cache add ca-certificates COPY --from=build /go/bin/ratelimit /bin/ratelimit diff --git a/Dockerfile.integration b/Dockerfile.integration index 55eb04b4..86120108 100644 --- a/Dockerfile.integration +++ b/Dockerfile.integration @@ -1,5 +1,5 @@ # Running this docker image runs the integration tests. -FROM golang:1.14 +FROM golang:1.17 RUN apt-get update -y && apt-get install sudo stunnel4 redis memcached -y && rm -rf /var/lib/apt/lists/* diff --git a/Makefile b/Makefile index 55fd1536..d949aba7 100644 --- a/Makefile +++ b/Makefile @@ -36,6 +36,7 @@ redis-per-second.conf: bootstrap_redis_tls: redis.conf redis-per-second.conf openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \ -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=localhost" \ + -addext "subjectAltName = DNS:localhost" \ -keyout key.pem -out cert.pem cat key.pem cert.pem > private.pem sudo cp cert.pem /usr/local/share/ca-certificates/redis-stunnel.crt diff --git a/go.mod b/go.mod index 1c282fcd..0184550b 100644 --- a/go.mod +++ b/go.mod @@ -1,14 +1,12 @@ module github.com/envoyproxy/ratelimit -go 1.14 +go 1.17 require ( github.com/alicebob/miniredis/v2 v2.11.4 github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b - github.com/cespare/xxhash v1.1.0 // indirect github.com/coocood/freecache v1.1.0 github.com/envoyproxy/go-control-plane v0.9.7 - github.com/fsnotify/fsnotify v1.4.7 // indirect github.com/golang/mock v1.4.1 github.com/golang/protobuf v1.4.2 github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 @@ -18,12 +16,26 @@ require ( github.com/lyft/gostats v0.4.0 github.com/mediocregopher/radix/v3 v3.5.1 github.com/sirupsen/logrus v1.6.0 - github.com/stretchr/objx v0.2.0 // indirect github.com/stretchr/testify v1.5.1 golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 + google.golang.org/grpc v1.27.0 + gopkg.in/yaml.v2 v2.3.0 +) + +require ( + github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect + github.com/fsnotify/fsnotify v1.4.7 // indirect + github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/objx v0.2.0 // indirect + github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb // indirect golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e // indirect golang.org/x/text v0.3.3-0.20191122225017-cbf43d21aaeb // indirect - google.golang.org/grpc v1.27.0 + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 // indirect + google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect google.golang.org/protobuf v1.25.0 // indirect - gopkg.in/yaml.v2 v2.3.0 ) From d9b92a78fef6c20936911288af23a143f80ab110 Mon Sep 17 00:00:00 2001 From: Vito Sabella Date: Thu, 31 Mar 2022 21:57:49 +0700 Subject: [PATCH 034/181] Update golang x/net to latest version to repair CVEs (#324) Signed-off-by: Vito Sabella --- Dockerfile | 2 +- go.mod | 6 +++--- go.sum | 14 ++++++++------ 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/Dockerfile b/Dockerfile index 8d9990a2..02d8ed30 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,5 +11,5 @@ COPY script script RUN CGO_ENABLED=0 GOOS=linux go build -o /go/bin/ratelimit -ldflags="-w -s" -v github.com/envoyproxy/ratelimit/src/service_cmd FROM alpine:3.15 AS final -RUN apk --no-cache add ca-certificates +RUN apk --no-cache add ca-certificates && apk --no-cache update COPY --from=build /go/bin/ratelimit /bin/ratelimit diff --git a/go.mod b/go.mod index 0184550b..6284a702 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/mediocregopher/radix/v3 v3.5.1 github.com/sirupsen/logrus v1.6.0 github.com/stretchr/testify v1.5.1 - golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 + golang.org/x/net v0.0.0-20220325170049-de3da57026de google.golang.org/grpc v1.27.0 gopkg.in/yaml.v2 v2.3.0 ) @@ -33,8 +33,8 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/objx v0.2.0 // indirect github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb // indirect - golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e // indirect - golang.org/x/text v0.3.3-0.20191122225017-cbf43d21aaeb // indirect + golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect + golang.org/x/text v0.3.7 // indirect golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 // indirect google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect google.golang.org/protobuf v1.25.0 // indirect diff --git a/go.sum b/go.sum index ff594f00..7f4ad15e 100644 --- a/go.sum +++ b/go.sum @@ -92,8 +92,8 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20220325170049-de3da57026de h1:pZB1TWnKi+o4bENlbzAgLrEbY4RMYmUIRobMcSmfeYc= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -102,12 +102,14 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3-0.20191122225017-cbf43d21aaeb h1:MsKWO3hK1h941VWsQ8dKJqIdb3r3XP9/cDw8n/B95SM= -golang.org/x/text v0.3.3-0.20191122225017-cbf43d21aaeb/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= From dbfc019c1013f725bdc3e81441a04c29cc42d585 Mon Sep 17 00:00:00 2001 From: "Michael S. Fischer" Date: Wed, 6 Apr 2022 22:00:36 -0700 Subject: [PATCH 035/181] Produce multi-arch Docker image for x86/arm64 (#325) Signed-off-by: Michael Fischer --- .github/workflows/main.yaml | 11 +++++++++-- .github/workflows/release.yaml | 9 ++++++++- Makefile | 9 +++++++++ 3 files changed, 26 insertions(+), 3 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index ae305019..48bf0536 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -18,11 +18,18 @@ jobs: steps: - uses: actions/checkout@v2 + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + + - name: Set up Docker buildx + id: buildx + uses: docker/setup-buildx-action@v1 + - name: build and push docker image run: | echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin - VERSION=master make docker_push # Push image tagged with "master" - make docker_push # Push image tagged with git sha + VERSION=master make docker_multiarch_push # Push image tagged with "master" + make docker_multiarch_push # Push image tagged with git sha env: DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index bcfc82b1..7b231131 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -17,10 +17,17 @@ jobs: steps: - uses: actions/checkout@v2 + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + + - name: Set up Docker buildx + id: buildx + uses: docker/setup-buildx-action@v1 + - name: build and push docker image run: | echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin - make docker_push + make docker_multiarch_push env: DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} diff --git a/Makefile b/Makefile index d949aba7..f72ea8af 100644 --- a/Makefile +++ b/Makefile @@ -7,6 +7,7 @@ MODULE = github.com/envoyproxy/ratelimit GIT_REF = $(shell git describe --tags --exact-match 2>/dev/null || git rev-parse --short=8 --verify HEAD) VERSION ?= $(GIT_REF) SHELL := /bin/bash +BUILDX_PLATFORMS := linux/amd64,linux/arm64/v8 .PHONY: bootstrap bootstrap: ; @@ -115,6 +116,14 @@ docker_image: docker_tests docker_push: docker_image docker push $(IMAGE):$(VERSION) +.PHONY: docker_multiarch_image +docker_multiarch_image: docker_tests + docker buildx build -t $(IMAGE):$(VERSION) --platform $(BUILDX_PLATFORMS) . + +.PHONY: docker_multiarch_push +docker_multiarch_push: docker_multiarch_image + docker buildx build -t $(IMAGE):$(VERSION) --platform $(BUILDX_PLATFORMS) --push . + .PHONY: integration_tests integration_tests: docker-compose --project-dir $(PWD) -f integration-test/docker-compose-integration-test.yml up --build --exit-code-from tester From 5b6e65da8a9b4fa500c3d3fdc2f8a4dacd857adc Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Mon, 18 Apr 2022 21:45:37 +0700 Subject: [PATCH 036/181] Allow to set key-pair for the Redis TLS connection (#328) This allows setting key-pair for connecting to Redis server which enables client certificate verification. Signed-off-by: Dhi Aurrahman --- .gitignore | 5 +++ Makefile | 18 ++++++++++- README.md | 1 + src/settings/settings.go | 48 ++++++++++++++++++++++++++-- test/integration/integration_test.go | 20 ++++++++++++ 5 files changed, 88 insertions(+), 4 deletions(-) diff --git a/.gitignore b/.gitignore index 826a29f4..47bc3e84 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,8 @@ key.pem private.pem redis-per-second.conf redis.conf +redis-verify-peer.conf + +# Directories created by "test_with_redis" make target. +63* +26* diff --git a/Makefile b/Makefile index f72ea8af..857788cd 100644 --- a/Makefile +++ b/Makefile @@ -8,6 +8,9 @@ GIT_REF = $(shell git describe --tags --exact-match 2>/dev/null || git rev-parse VERSION ?= $(GIT_REF) SHELL := /bin/bash BUILDX_PLATFORMS := linux/amd64,linux/arm64/v8 +# Root dir returns absolute path of current directory. It has a trailing "/". +PROJECT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) +export PROJECT_DIR .PHONY: bootstrap bootstrap: ; @@ -26,15 +29,26 @@ pid = /var/run/stunnel-2.pid accept = 127.0.0.1:16382 connect = 127.0.0.1:6382 endef +define REDIS_VERIFY_PEER_STUNNEL +cert = private.pem +pid = /var/run/stunnel-3.pid +[redis] +CAfile = cert.pem +accept = 127.0.0.1:16361 +connect = 127.0.0.1:6361 +endef export REDIS_STUNNEL export REDIS_PER_SECOND_STUNNEL +export REDIS_VERIFY_PEER_STUNNEL redis.conf: echo "$$REDIS_STUNNEL" >> $@ redis-per-second.conf: echo "$$REDIS_PER_SECOND_STUNNEL" >> $@ +redis-verify-peer.conf: + echo "$$REDIS_VERIFY_PEER_STUNNEL" >> $@ .PHONY: bootstrap_redis_tls -bootstrap_redis_tls: redis.conf redis-per-second.conf +bootstrap_redis_tls: redis.conf redis-per-second.conf redis-verify-peer.conf openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \ -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=localhost" \ -addext "subjectAltName = DNS:localhost" \ @@ -45,6 +59,7 @@ bootstrap_redis_tls: redis.conf redis-per-second.conf sudo update-ca-certificates sudo stunnel redis.conf sudo stunnel redis-per-second.conf + sudo stunnel redis-verify-peer.conf .PHONY: docs_format docs_format: script/docs_check_format @@ -77,6 +92,7 @@ tests: compile tests_with_redis: bootstrap_redis_tls tests_unit redis-server --port 6381 --requirepass password123 & redis-server --port 6382 --requirepass password123 & + redis-server --port 6361 --requirepass password123 & redis-server --port 6392 --requirepass password123 & redis-server --port 6393 --requirepass password123 --slaveof 127.0.0.1 6392 --masterauth password123 & diff --git a/README.md b/README.md index 656e615a..3416ceac 100644 --- a/README.md +++ b/README.md @@ -655,6 +655,7 @@ Ratelimit uses Redis as its caching layer. Ratelimit supports two operation mode As well Ratelimit supports TLS connections and authentication. These can be configured using the following environment variables: 1. `REDIS_TLS` & `REDIS_PERSECOND_TLS`: set to `"true"` to enable a TLS connection for the specific connection type. +1. `REDIS_TLS_CLIENT_CERT`, `REDIS_TLS_CLIENT_KEY`, and `REDIS_TLS_CACERT` to provides files to specify a TLS connection configuration to Redis server that requires client certificate verification. (This is effective when `REDIS_TLS` or `REDIS_PERSECOND_TLS` is set to to `"true"`). 1. `REDIS_AUTH` & `REDIS_PERSECOND_AUTH`: set to `"password"` to enable authentication to the redis host. 1. `CACHE_KEY_PREFIX`: a string to prepend to all cache keys diff --git a/src/settings/settings.go b/src/settings/settings.go index fa724330..a271cc07 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -2,6 +2,9 @@ package settings import ( "crypto/tls" + "crypto/x509" + "fmt" + "os" "time" "github.com/kelseyhightower/envconfig" @@ -67,6 +70,10 @@ type Settings struct { RedisTls bool `envconfig:"REDIS_TLS" default:"false"` // TODO: Make this setting configurable out of the box instead of having to provide it through code. RedisTlsConfig *tls.Config + // Allow to set the client certificate and key for TLS connections. + RedisTlsClientCert string `envconfig:"REDIS_TLS_CLIENT_CERT" default:""` + RedisTlsClientKey string `envconfig:"REDIS_TLS_CLIENT_KEY" default:""` + RedisTlsCACert string `envconfig:"REDIS_TLS_CACERT" default:""` // RedisPipelineWindow sets the duration after which internal pipelines will be flushed. // If window is zero then implicit pipelining will be disabled. Radix use 150us for the @@ -109,15 +116,18 @@ type Option func(*Settings) func NewSettings() Settings { var s Settings - err := envconfig.Process("", &s) + if err := envconfig.Process("", &s); err != nil { + panic(err) + } // Golang copy-by-value causes the RootCAs to no longer be nil // which isn't the expected default behavior of continuing to use system roots // so let's just initialize to what we want the correct value to be. s.RedisTlsConfig = &tls.Config{} - if err != nil { - panic(err) + // When we require to connect using TLS, we check if we need to connect using the provided key-pair. + if s.RedisTls || s.RedisPerSecondTls { + TlsConfigFromFiles(s.RedisTlsClientCert, s.RedisTlsClientKey, s.RedisTlsCACert)(&s) } return s @@ -128,3 +138,35 @@ func GrpcUnaryInterceptor(i grpc.UnaryServerInterceptor) Option { s.GrpcUnaryInterceptor = grpc.UnaryInterceptor(i) } } + +// TlsConfigFromFiles sets the TLS config from the provided files. +func TlsConfigFromFiles(cert, key, caCert string) Option { + return func(s *Settings) { + if s.RedisTlsConfig == nil { + s.RedisTlsConfig = new(tls.Config) + } + if cert != "" && key != "" { + clientCert, err := tls.LoadX509KeyPair(cert, key) + if err != nil { + panic(fmt.Errorf("failed lo load client TLS key pair: %w", err)) + } + s.RedisTlsConfig.Certificates = append(s.RedisTlsConfig.Certificates, clientCert) + } + + if caCert != "" { + certPool := x509.NewCertPool() + if !certPool.AppendCertsFromPEM(mustReadFile(caCert)) { + panic("failed to load the provided TLS CA certificate") + } + s.RedisTlsConfig.RootCAs = certPool + } + } +} + +func mustReadFile(name string) []byte { + b, err := os.ReadFile(name) + if err != nil { + panic(fmt.Errorf("failed to read file: %s: %w", name, err)) + } + return b +} diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 66b6d1b6..4bf732bf 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -9,6 +9,7 @@ import ( "io" "math/rand" "os" + "path/filepath" "strconv" "testing" "time" @@ -26,6 +27,8 @@ import ( "github.com/envoyproxy/ratelimit/test/common" ) +var projectDir = os.Getenv("PROJECT_DIR") + func init() { os.Setenv("USE_STATSD", "false") // Memcache does async increments, which can cause race conditions during @@ -137,6 +140,9 @@ func TestBasicTLSConfig(t *testing.T) { t.Run("WithPerSecondRedisTLS", testBasicConfigAuthTLS(true, 0)) t.Run("WithoutPerSecondRedisTLSWithLocalCache", testBasicConfigAuthTLS(false, 1000)) t.Run("WithPerSecondRedisTLSWithLocalCache", testBasicConfigAuthTLS(true, 1000)) + + // Test using client cert. + t.Run("WithoutPerSecondRedisTLSWithClientCert", testBasicConfigAuthTLSWithClientCert(false, 0)) } func TestBasicAuthConfig(t *testing.T) { @@ -234,6 +240,20 @@ func testBasicConfigAuthTLS(perSecond bool, local_cache_size int) func(*testing. return testBasicBaseConfig(s) } +func testBasicConfigAuthTLSWithClientCert(perSecond bool, local_cache_size int) func(*testing.T) { + // "16361" is the port of the redis server running behind stunnel with verify level 2 (the level 2 + // verifies the peer certificate against the defined CA certificate (CAfile)). + // See: Makefile#REDIS_VERIFY_PEER_STUNNEL. + s := makeSimpleRedisSettings(16361, 16382, perSecond, local_cache_size) + settings.TlsConfigFromFiles(filepath.Join(projectDir, "cert.pem"), filepath.Join(projectDir, "key.pem"), filepath.Join(projectDir, "cert.pem"))(&s) + s.RedisAuth = "password123" + s.RedisTls = true + s.RedisPerSecondAuth = "password123" + s.RedisPerSecondTls = true + + return testBasicBaseConfig(s) +} + func testBasicConfig(s settings.Settings) func(*testing.T) { return testBasicBaseConfig(s) } From c37fa4547c398613c6b4fe0312637fbf7f37769b Mon Sep 17 00:00:00 2001 From: Ethern Su Date: Thu, 12 May 2022 23:26:43 +0800 Subject: [PATCH 037/181] Implemented OpenTelemetry Instrumentation (#332) Signed-off-by: Ethern Su --- README.md | 23 ++ examples/otlp-collector/config.yaml | 31 +++ go.mod | 38 ++- go.sum | 402 ++++++++++++++++++++++++++-- src/client_cmd/main.go | 63 ++++- src/memcached/cache_impl.go | 14 + src/redis/fixed_cache_impl.go | 15 ++ src/server/server_impl.go | 29 +- src/service/ratelimit.go | 15 ++ src/service_cmd/runner/runner.go | 12 + src/settings/settings.go | 14 +- src/trace/trace.go | 99 +++++++ test/memcached/cache_impl_test.go | 79 ++++-- test/redis/fixed_cache_impl_test.go | 77 ++++-- test/server/server_impl_test.go | 11 +- test/service/ratelimit_test.go | 120 ++++++--- 16 files changed, 923 insertions(+), 119 deletions(-) create mode 100644 examples/otlp-collector/config.yaml create mode 100644 src/trace/trace.go diff --git a/README.md b/README.md index 3416ceac..c8616cfa 100644 --- a/README.md +++ b/README.md @@ -45,6 +45,7 @@ - [Health Checking for Redis Active Connection](#health-checking-for-redis-active-connection) - [Memcache](#memcache) - [Custom headers](#custom-headers) +- [Tracing](#tracing) - [Contact](#contact) @@ -752,6 +753,28 @@ The following environment variables control the custom response feature: 1. `LIMIT_REMAINING_HEADER` - The default value is "RateLimit-Remaining", setting the environment variable will specify an alternative header name 1. `LIMIT_RESET_HEADER` - The default value is "RateLimit-Reset", setting the environment variable will specify an alternative header name +You may use the following commands to quickly setup a openTelemetry collector together with a Jaeger all-in-one binary for quickstart: + +```bash +docker run --name otlp -d -p 4318 -p 4317 -v examples/otlp-collector:/tmp/otlp-collector otel/opentelemetry-collector:0.48.0 -- --config /tmp/otlp-collector/config.yaml +otelcol-contrib --config examples/otlp-collector/config.yaml + +docker run -d --name jaeger -p 16686:16686 -p 14250:14250 jaegertracing/all-in-one:1.33 +``` + +# Tracing + +Ratelimit supports exporting spans in OLTP format. See [OpenTelemetry](https://opentelemetry.io/) for more information. + +Theh following environment variables control the tracing feature: + +1. `TRACING_ENABLED` - Enables the tracing feature. Only "true" and "false"(default) are allowed in this field. +1. `TRACING_EXPORTER_PROTOCOL` - Controls the protocol of exporter in tracing feature. Only "http"(default) and "grpc" are allowed in this field. +1. `TRACING_SERVICE_NAME` - Controls the service name appears in tracing span. The default value is "RateLimit". +1. `TRACING_SERVICE_NAMESPACE` - Controls the service namespace appears in tracing span. The default value is empty. +1. `TRACING_SERVICE_INSTANCE_ID` - Controls the service instance id appears in tracing span. It is recommended to put the pod name or container name in this field. The default value is a randomly generated version 4 uuid if unspecified. +1. Other fields in [OTLP Exporter Documentation](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.8.0/specification/protocol/exporter.md). These section needs to be correctly configured in order to enable the exporter to export span to the correct destination. + # Contact - [envoy-announce](https://groups.google.com/forum/#!forum/envoy-announce): Low frequency mailing diff --git a/examples/otlp-collector/config.yaml b/examples/otlp-collector/config.yaml new file mode 100644 index 00000000..f14ae1ed --- /dev/null +++ b/examples/otlp-collector/config.yaml @@ -0,0 +1,31 @@ +extensions: + health_check: + pprof: + endpoint: 0.0.0.0:1777 + zpages: + endpoint: 0.0.0.0:55679 + +receivers: + otlp: + protocols: + grpc: + http: + +processors: + batch: + +exporters: + logging: + logLevel: debug + jaeger: + endpoint: "localhost:14250" + tls: + insecure: true + +service: + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [logging, jaeger] + extensions: [health_check, pprof, zpages] diff --git a/go.mod b/go.mod index 6284a702..5ce4462c 100644 --- a/go.mod +++ b/go.mod @@ -6,9 +6,9 @@ require ( github.com/alicebob/miniredis/v2 v2.11.4 github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b github.com/coocood/freecache v1.1.0 - github.com/envoyproxy/go-control-plane v0.9.7 - github.com/golang/mock v1.4.1 - github.com/golang/protobuf v1.4.2 + github.com/envoyproxy/go-control-plane v0.10.1 + github.com/golang/mock v1.4.4 + github.com/golang/protobuf v1.5.2 github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 github.com/kavu/go_reuseport v1.2.0 github.com/kelseyhightower/envconfig v1.4.0 @@ -16,26 +16,44 @@ require ( github.com/lyft/gostats v0.4.0 github.com/mediocregopher/radix/v3 v3.5.1 github.com/sirupsen/logrus v1.6.0 - github.com/stretchr/testify v1.5.1 + github.com/stretchr/testify v1.7.1 golang.org/x/net v0.0.0-20220325170049-de3da57026de - google.golang.org/grpc v1.27.0 + google.golang.org/grpc v1.45.0 gopkg.in/yaml.v2 v2.3.0 ) +require ( + github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.6.3 // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect + google.golang.org/protobuf v1.28.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect +) + require ( github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect github.com/fsnotify/fsnotify v1.4.7 // indirect + github.com/google/uuid v1.3.0 github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/objx v0.2.0 // indirect github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb // indirect - golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 + go.opentelemetry.io/otel v1.7.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.6.3 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.6.3 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.6.3 + go.opentelemetry.io/otel/sdk v1.7.0 + go.opentelemetry.io/otel/trace v1.7.0 + go.opentelemetry.io/proto/otlp v0.16.0 // indirect + golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 // indirect golang.org/x/text v0.3.7 // indirect - golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 // indirect - google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect - google.golang.org/protobuf v1.25.0 // indirect ) diff --git a/go.sum b/go.sum index 7f4ad15e..70f0c04c 100644 --- a/go.sum +++ b/go.sum @@ -1,22 +1,67 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 h1:45bxf7AZMwWcqkLzDAQugVEwedisr5nRJ1r+7LYnv0U= github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis/v2 v2.11.4 h1:GsuyeunTx7EllZBU3/6Ji3dhMQZDpC9rLf1luJ+6M5M= github.com/alicebob/miniredis/v2 v2.11.4/go.mod h1:VL3UDEfAH59bSa7MuHMuFToxkqyHh69s/WUbYlOAuyg= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b h1:L/QXpzIa3pOvUGt1D1lA5KjYhPBAN/3iWdP7xeFS9F0= github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= +github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354 h1:9kRtNpqLHbZVO/NNxhHp2ymxFxsHOe3x2efJGn//Tas= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coocood/freecache v1.1.0 h1:ENiHOsWdj1BrrlPwblhbn4GdAsMymK3pZORJ+bJGAjA= github.com/coocood/freecache v1.1.0/go.mod h1:ePwxCDzOYvARfHdr1pByNct1at3CoKnsipOHwKlNbzI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -24,43 +69,109 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.7 h1:EARl0OvqMoxq/UMgMSCLnXzkaXbxzskluEBlMQCJPms= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.1 h1:cgDRLG7bs59Zd+apAWuzLQL95obVYAymNJek76W3mgw= +github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.4.1 h1:ocYkMQY5RrXTYgXl7ICpV0IXwlEQGwKIsery4gyXa1U= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3 h1:6amM4HsNPOvMLVc2ZnyqrjeQ92YAVWn7T4WBKK87inY= github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 h1:VQjjMh+uElTfioy6GnUrVrTMAiLTNF3xsrAlSwC+g8o= github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kavu/go_reuseport v1.2.0 h1:YO+pt6m5Z3WkVH9DjaDJzoSS/0FO2Q8x3CfObxk/i2E= github.com/kavu/go_reuseport v1.2.0/go.mod h1:CG8Ee7ceMFSMnx/xr25Vm0qXaj2Z4i5PWoUx+JZ5/CU= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/lyft/goruntime v0.2.5 h1:yRmwOXl3Zns3+Z03fDMWt5+p609rfhIErh7HYCayODg= github.com/lyft/goruntime v0.2.5/go.mod h1:8rUh5gwIPQtyIkIXHbLN1j45HOb8cMgDhrw5GA7DF4g= github.com/lyft/gostats v0.4.0 h1:PbRWmwidTPk6Y80S6itBWDa+XVt1hGvqFM88TBJYdOo= @@ -70,6 +181,8 @@ github.com/mediocregopher/radix/v3 v3.5.1/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= @@ -79,57 +192,300 @@ github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb h1:ZkM6LRnq40pR1Ox0hTHlnpkcOTuFIDQpZ1IN8rKKhX0= github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= +go.opentelemetry.io/otel v1.6.1/go.mod h1:blzUabWHkX6LJewxvadmzafgh/wnvBSDBdOuwkAtrWQ= +go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= +go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM= +go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.6.3 h1:nAmg1WgsUXoXf46dJG9eS/AzOcvkCTK4xJSUYpWyHYg= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.6.3/go.mod h1:NEu79Xo32iVb+0gVNV8PMd7GoWqnyDXRlj04yFjqz40= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.6.3 h1:4/UjHWMVVc5VwX/KAtqJOHErKigMCH8NexChMuanb/o= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.6.3/go.mod h1:UJmXdiVVBaZ63umRUTwJuCMAV//GCMvDiQwn703/GoY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.6.3 h1:leYDq5psbM3K4QNcZ2juCj30LjUnvxjuYQj1mkGjXFM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.6.3/go.mod h1:ycItY/esVj8c0dKgYTOztTERXtPzcfDU/0o8EdwCjoA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.6.3 h1:ufVuVt/g16GZ/yDOyp+AcCGebGX8u4z7kDRuwEX0DkA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.6.3/go.mod h1:S18p8VK4KRHHyAg5rH3iUnJUcRvIUg9xwIWtq1MWibM= +go.opentelemetry.io/otel/sdk v1.6.3/go.mod h1:A4iWF7HTXa+GWL/AaqESz28VuSBIcZ+0CV+IzJ5NMiQ= +go.opentelemetry.io/otel/sdk v1.7.0 h1:4OmStpcKVOfvDOgCt7UriAPtKolwIhxpnSNI/yK+1B0= +go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU= +go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= +go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= +go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o= +go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.16.0 h1:WHzDWdXUvbc5bG2ObdrGfaNpQz7ft7QN9HHmJlbiB1E= +go.opentelemetry.io/proto/otlp v0.16.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20220325170049-de3da57026de h1:pZB1TWnKi+o4bENlbzAgLrEbY4RMYmUIRobMcSmfeYc= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 h1:b9mVrqYfq3P4bCdaLg1qtBnPzUYgglsIdjZkL/fQVOE= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -138,14 +494,30 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/src/client_cmd/main.go b/src/client_cmd/main.go index 030229a0..7578a5bf 100644 --- a/src/client_cmd/main.go +++ b/src/client_cmd/main.go @@ -4,6 +4,7 @@ import ( "errors" "flag" "fmt" + "log" "os" "strings" @@ -11,6 +12,18 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "golang.org/x/net/context" "google.golang.org/grpc" + + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + + "go.opentelemetry.io/otel" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.7.0" ) type descriptorsValue struct { @@ -52,13 +65,27 @@ func main() { flag.Var( &descriptorsValue, "descriptors", "descriptor list to query in =,=,... form") + oltpProtocol := flag.String("oltp_protocol", "", "protocol to use when exporting tracing span, accept http, grpc or empty (disable tracing) as value, please use OLTP environment variables to set endpoint (refer to README.MD)") flag.Parse() flag.VisitAll(func(f *flag.Flag) { fmt.Printf("Flag: --%s=%q\n", f.Name, f.Value) }) - conn, err := grpc.Dial(*dialString, grpc.WithInsecure()) + if *oltpProtocol != "" { + tp := InitTracerProvider(*oltpProtocol) + defer func() { + if err := tp.Shutdown(context.Background()); err != nil { + log.Printf("Error shutting down tracer provider: %v", err) + } + }() + } + + conn, err := grpc.Dial(*dialString, + grpc.WithInsecure(), + grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor()), + grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor()), + ) if err != nil { fmt.Printf("error connecting: %s\n", err.Error()) os.Exit(1) @@ -84,3 +111,37 @@ func main() { fmt.Printf("response: %s\n", response.String()) } + +// using a simpler setup in this trace provider for simplicity +func InitTracerProvider(protocol string) *sdktrace.TracerProvider { + var client otlptrace.Client + + switch protocol { + case "http": + client = otlptracehttp.NewClient() + case "grpc": + client = otlptracegrpc.NewClient() + default: + fmt.Printf("Invalid otlptrace client protocol: %s", protocol) + panic("Invalid otlptrace client protocol") + } + + exporter, err := otlptrace.New(context.Background(), client) + if err != nil { + log.Fatalf("creating OTLP trace exporter: %v", err) + } + + resource := resource.NewWithAttributes( + semconv.SchemaURL, + semconv.ServiceNameKey.String("RateLimitClient"), + ) + + tp := sdktrace.NewTracerProvider( + sdktrace.WithSampler(sdktrace.AlwaysSample()), + sdktrace.WithBatcher(exporter), + sdktrace.WithResource(resource), + ) + otel.SetTracerProvider(tp) + otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})) + return tp +} diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index 9d00bec7..a79451df 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -22,6 +22,10 @@ import ( "sync" "time" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "github.com/envoyproxy/ratelimit/src/stats" "github.com/coocood/freecache" @@ -40,6 +44,8 @@ import ( "github.com/envoyproxy/ratelimit/src/utils" ) +var tracer = otel.Tracer("memcached.cacheImpl") + type rateLimitMemcacheImpl struct { client Client timeSource utils.TimeSource @@ -88,6 +94,14 @@ func (this *rateLimitMemcacheImpl) DoLimit( keysToGet = append(keysToGet, cacheKey.Key) } + // Generate trace + _, span := tracer.Start(ctx, "Memcached Fetch Execution", + trace.WithAttributes( + attribute.Int("keysToGet length", len(keysToGet)), + ), + ) + defer span.End() + // Now fetch from memcache. responseDescriptorStatuses := make([]*pb.RateLimitResponse_DescriptorStatus, len(request.Descriptors)) diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index 26c3902e..a61b319f 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -3,6 +3,10 @@ package redis import ( "math/rand" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "github.com/envoyproxy/ratelimit/src/stats" "github.com/coocood/freecache" @@ -15,6 +19,8 @@ import ( "github.com/envoyproxy/ratelimit/src/utils" ) +var tracer = otel.Tracer("redis.fixedCacaheImpl") + type fixedRateLimitCacheImpl struct { client Client // Optional Client for a dedicated cache of per second limits. @@ -87,6 +93,15 @@ func (this *fixedRateLimitCacheImpl) DoLimit( } } + // Generate trace + _, span := tracer.Start(ctx, "Redis Pipeline Execution", + trace.WithAttributes( + attribute.Int("pipeline length", len(pipeline)), + attribute.Int("perSecondPipeline length", len(perSecondPipeline)), + ), + ) + defer span.End() + if pipeline != nil { checkError(this.client.PipeDo(pipeline)) } diff --git a/src/server/server_impl.go b/src/server/server_impl.go index bab235ec..1f09003c 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -2,6 +2,7 @@ package server import ( "bytes" + "context" "expvar" "fmt" "io" @@ -34,8 +35,15 @@ import ( "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/settings" + + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) +var tracer = otel.Tracer("ratelimit server") + type serverDebugListener struct { endpoints map[string]string debugMux *http.ServeMux @@ -75,19 +83,29 @@ func NewJsonHandler(svc pb.RateLimitServiceServer) func(http.ResponseWriter, *ht return func(writer http.ResponseWriter, request *http.Request) { var req pb.RateLimitRequest + ctx := context.Background() + if err := jsonpb.Unmarshal(request.Body, &req); err != nil { logger.Warnf("error: %s", err.Error()) http.Error(writer, err.Error(), http.StatusBadRequest) return } - resp, err := svc.ShouldRateLimit(nil, &req) + resp, err := svc.ShouldRateLimit(ctx, &req) if err != nil { logger.Warnf("error: %s", err.Error()) http.Error(writer, err.Error(), http.StatusBadRequest) return } + // Generate trace + _, span := tracer.Start(ctx, "NewJsonHandler Remaining Execution", + trace.WithAttributes( + attribute.String("response", resp.String()), + ), + ) + defer span.End() + logger.Debugf("resp:%s", resp) buf := bytes.NewBuffer(nil) @@ -185,7 +203,14 @@ func newServer(s settings.Settings, name string, statsManager stats.Manager, loc MaxConnectionAgeGrace: s.GrpcMaxConnectionAgeGrace, }) - ret.grpcServer = grpc.NewServer(s.GrpcUnaryInterceptor, keepaliveOpt) + ret.grpcServer = grpc.NewServer( + keepaliveOpt, + grpc.ChainUnaryInterceptor( + s.GrpcUnaryInterceptor, // chain otel interceptor after the input interceptor + otelgrpc.UnaryServerInterceptor(), + ), + grpc.StreamInterceptor(otelgrpc.StreamServerInterceptor()), + ) // setup listen addresses ret.httpAddress = net.JoinHostPort(s.Host, strconv.Itoa(s.Port)) diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index 6b4b2347..bb74e2e0 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -7,6 +7,10 @@ import ( "strings" "sync" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "github.com/envoyproxy/ratelimit/src/settings" "github.com/envoyproxy/ratelimit/src/stats" @@ -24,6 +28,8 @@ import ( "github.com/envoyproxy/ratelimit/src/redis" ) +var tracer = otel.Tracer("ratelimit") + type RateLimitServiceServer interface { pb.RateLimitServiceServer GetCurrentConfig() config.RateLimitConfig @@ -240,6 +246,15 @@ func (this *service) ShouldRateLimit( ctx context.Context, request *pb.RateLimitRequest) (finalResponse *pb.RateLimitResponse, finalError error) { + // Generate trace + _, span := tracer.Start(ctx, "ShouldRateLimit Execution", + trace.WithAttributes( + attribute.String("domain", request.Domain), + attribute.String("request string", request.String()), + ), + ) + defer span.End() + defer func() { err := recover() if err == nil { diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index e6f3ccfd..c72c4982 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -1,6 +1,7 @@ package runner import ( + "context" "io" "math/rand" "net/http" @@ -10,6 +11,7 @@ import ( "github.com/envoyproxy/ratelimit/src/metrics" "github.com/envoyproxy/ratelimit/src/stats" + "github.com/envoyproxy/ratelimit/src/trace" gostats "github.com/lyft/gostats" @@ -75,6 +77,16 @@ func createLimiter(srv server.Server, s settings.Settings, localCache *freecache func (runner *Runner) Run() { s := runner.settings + if s.TracingEnabled { + tp := trace.InitProductionTraceProvider(s.TracingExporterProtocol, s.TracingServiceName, s.TracingServiceNamespace, s.TracingServiceInstanceId) + defer func() { + if err := tp.Shutdown(context.Background()); err != nil { + logger.Printf("Error shutting down tracer provider: %v", err) + } + }() + } else { + logger.Infof("Tracing disabled") + } logLevel, err := logger.ParseLevel(s.LogLevel) if err != nil { diff --git a/src/settings/settings.go b/src/settings/settings.go index a271cc07..222bc6f1 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -13,7 +13,8 @@ import ( type Settings struct { // runtime options - GrpcUnaryInterceptor grpc.ServerOption + // This value shall be imported into unary server interceptor in order to enable chaining + GrpcUnaryInterceptor grpc.UnaryServerInterceptor // Server listen address config Host string `envconfig:"HOST" default:"0.0.0.0"` Port int `envconfig:"PORT" default:"8080"` @@ -110,6 +111,15 @@ type Settings struct { // Should the ratelimiting be running in Global shadow-mode, ie. never report a ratelimit status, unless a rate was provided from envoy as an override GlobalShadowMode bool `envconfig:"SHADOW_MODE" default:"false"` + + // OTLP trace settings + TracingEnabled bool `envconfig:"TRACING_ENABLED" default:"false"` + TracingServiceName string `envconfig:"TRACING_SERVICE_NAME" default:"RateLimit"` + TracingServiceNamespace string `envconfig:"TRACING_SERVICE_NAMESPACE" default:""` + TracingServiceInstanceId string `envconfig:"TRACING_SERVICE_INSTANCE_ID" default:""` + // can only be http or gRPC + TracingExporterProtocol string `envconfig:"TRACING_EXPORTER_PROTOCOL" default:"http"` + // detailed setting of exporter should refer to https://opentelemetry.io/docs/reference/specification/protocol/exporter/, e.g. OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TIMEOUT } type Option func(*Settings) @@ -135,7 +145,7 @@ func NewSettings() Settings { func GrpcUnaryInterceptor(i grpc.UnaryServerInterceptor) Option { return func(s *Settings) { - s.GrpcUnaryInterceptor = grpc.UnaryInterceptor(i) + s.GrpcUnaryInterceptor = i } } diff --git a/src/trace/trace.go b/src/trace/trace.go new file mode 100644 index 00000000..fed78e7a --- /dev/null +++ b/src/trace/trace.go @@ -0,0 +1,99 @@ +package trace + +import ( + "context" + "sync" + + "github.com/google/uuid" + logger "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/sdk/trace/tracetest" + semconv "go.opentelemetry.io/otel/semconv/v1.7.0" +) + +var ( + testSpanExporter *tracetest.InMemoryExporter + testSpanExporterMu sync.Mutex +) + +func InitProductionTraceProvider(protocol string, serviceName string, serviceNamespace string, serviceInstanceId string) *sdktrace.TracerProvider { + client := createClient(protocol) + exporter, err := otlptrace.New(context.Background(), client) + if err != nil { + logger.Fatalf("creating OTLP trace exporter: %v", err) + } + + var useServiceInstanceId string + if serviceInstanceId == "" { + intUuid, err := uuid.NewRandom() + if err != nil { + logger.Fatalf("generating random uuid for trace exporter: %v", err) + } + useServiceInstanceId = intUuid.String() + } else { + useServiceInstanceId = serviceInstanceId + } + + resource := resource.NewWithAttributes( + semconv.SchemaURL, + semconv.ServiceNameKey.String(serviceName), + semconv.ServiceNamespaceKey.String(serviceNamespace), + semconv.ServiceInstanceIDKey.String(useServiceInstanceId), + ) + + if err != nil { + logger.Fatal(err) + } + tp := sdktrace.NewTracerProvider( + sdktrace.WithSampler(sdktrace.AlwaysSample()), + sdktrace.WithBatcher(exporter), + sdktrace.WithResource(resource), + ) + otel.SetTracerProvider(tp) + otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})) + logger.Infof("TracerProvider initialized with following parameters: protocol: %s, serviceName: %s, serviceNamespace: %s, serviceInstanceId: %s", protocol, serviceName, serviceNamespace, useServiceInstanceId) + return tp +} + +func createClient(protocol string) (client otlptrace.Client) { + // endpoint is implicitly set by env variables, refer to https://opentelemetry.io/docs/reference/specification/protocol/exporter/ + switch protocol { + case "http", "": + client = otlptracehttp.NewClient() + case "grpc": + client = otlptracegrpc.NewClient() + default: + logger.Fatalf("Invalid otlptrace client protocol: %s", protocol) + panic("Invalid otlptrace client protocol") + } + return +} + +// This function returns the initialized inMemoryExporter if it already exists. If not, it initializes an inMemoryExporter, a trace provider using the exporter, and bind otel package with the trace provider. It is designed to serve testing purpose solely. +// Note: only call this function once in each of the test packages, and assign the returned exporter to a package level variable +func GetTestSpanExporter() *tracetest.InMemoryExporter { + testSpanExporterMu.Lock() + defer testSpanExporterMu.Unlock() + if testSpanExporter != nil { + return testSpanExporter + } + // init a new InMemoryExporter and share it with the entire test runtime + testSpanExporter := tracetest.NewInMemoryExporter() + + // add in-memory span exporter to default openTelemetry trace provider + tp := sdktrace.NewTracerProvider( + sdktrace.WithSampler(sdktrace.AlwaysSample()), + // use syncer instead of batcher here to leverage its synchronization nature to avoid flaky test + sdktrace.WithSyncer(testSpanExporter), + ) + otel.SetTracerProvider(tp) + otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})) + + return testSpanExporter +} diff --git a/test/memcached/cache_impl_test.go b/test/memcached/cache_impl_test.go index a4640148..14106934 100644 --- a/test/memcached/cache_impl_test.go +++ b/test/memcached/cache_impl_test.go @@ -5,6 +5,7 @@ package memcached_test import ( + "context" "math/rand" "strconv" "testing" @@ -21,6 +22,7 @@ import ( "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/memcached" "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/trace" "github.com/envoyproxy/ratelimit/src/utils" "github.com/golang/mock/gomock" @@ -31,6 +33,8 @@ import ( mock_utils "github.com/envoyproxy/ratelimit/test/mocks/utils" ) +var testSpanExporter = trace.GetTestSpanExporter() + func TestMemcached(t *testing.T) { assert := assert.New(t) controller := gomock.NewController(t) @@ -53,7 +57,7 @@ func TestMemcached(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) @@ -80,7 +84,7 @@ func TestMemcached(t *testing.T) { {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, }, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(1), limits[1].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[1].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[1].Stats.NearLimit.Value()) @@ -115,7 +119,7 @@ func TestMemcached(t *testing.T) { {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, }, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) @@ -150,7 +154,7 @@ func TestMemcachedGetError(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) @@ -168,7 +172,7 @@ func TestMemcachedGetError(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) @@ -244,7 +248,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, }, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) @@ -265,7 +269,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, }, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) @@ -286,7 +290,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, }, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) @@ -304,7 +308,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, }, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(4), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimitWithLocalCache.Value()) @@ -345,7 +349,7 @@ func TestNearLimit(t *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, }, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) @@ -362,7 +366,7 @@ func TestNearLimit(t *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, }, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) @@ -380,7 +384,7 @@ func TestNearLimit(t *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, }, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) @@ -399,7 +403,7 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) @@ -417,7 +421,7 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) @@ -435,7 +439,7 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(3), limits[0].Stats.NearLimit.Value()) @@ -453,7 +457,7 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) @@ -471,7 +475,7 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(7), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(4), limits[0].Stats.NearLimit.Value()) @@ -489,7 +493,7 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(3), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) @@ -533,7 +537,7 @@ func TestMemcacheWithJitter(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) @@ -576,7 +580,7 @@ func TestMemcacheAdd(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) @@ -600,7 +604,7 @@ func TestMemcacheAdd(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) @@ -648,6 +652,39 @@ func TestNewRateLimitCacheImplFromSettingsWhenHostAndPortAndSrvAreBothSet(t *tes }) } +func TestMemcachedTracer(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + testSpanExporter.Reset() + + timeSource := mock_utils.NewMockTimeSource(controller) + client := mock_memcached.NewMockClient(controller) + statsStore := stats.NewStore(stats.NewNullSink(), false) + sm := mockstats.NewMockStatManager(statsStore) + + cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, sm, 0.8, "") + + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) + client.EXPECT().GetMulti([]string{"domain_key_value_1234"}).Return( + getMultiResult(map[string]int{"domain_key_value_1234": 4}), nil, + ) + client.EXPECT().Increment("domain_key_value_1234", uint64(1)).Return(uint64(5), nil) + + request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} + + cache.DoLimit(context.Background(), request, limits) + + spanStubs := testSpanExporter.GetSpans() + assert.NotNil(spanStubs) + assert.Len(spanStubs, 1) + assert.Equal(spanStubs[0].Name, "Memcached Fetch Execution") + + cache.Flush() +} + func getMultiResult(vals map[string]int) map[string]*memcache.Item { result := make(map[string]*memcache.Item, len(vals)) for k, v := range vals { diff --git a/test/redis/fixed_cache_impl_test.go b/test/redis/fixed_cache_impl_test.go index e28da4b5..8933d200 100644 --- a/test/redis/fixed_cache_impl_test.go +++ b/test/redis/fixed_cache_impl_test.go @@ -1,6 +1,7 @@ package redis_test import ( + "context" "math/rand" "testing" @@ -15,6 +16,7 @@ import ( "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" + "github.com/envoyproxy/ratelimit/src/trace" "github.com/envoyproxy/ratelimit/src/utils" "github.com/golang/mock/gomock" @@ -25,6 +27,8 @@ import ( mock_utils "github.com/envoyproxy/ratelimit/test/mocks/utils" ) +var testSpanExporter = trace.GetTestSpanExporter() + func TestRedis(t *testing.T) { t.Run("WithoutPerSecondRedis", testRedis(false)) t.Run("WithPerSecondRedis", testRedis(true)) @@ -69,7 +73,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) @@ -97,7 +101,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, }, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(1), limits[1].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[1].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[1].Stats.NearLimit.Value()) @@ -128,7 +132,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, }, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) @@ -207,7 +211,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, }, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) @@ -228,7 +232,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, }, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) @@ -249,7 +253,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, }, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) @@ -268,7 +272,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, }, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(4), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimitWithLocalCache.Value()) @@ -307,7 +311,7 @@ func TestNearLimit(t *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, }, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) @@ -324,7 +328,7 @@ func TestNearLimit(t *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, }, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) @@ -342,7 +346,7 @@ func TestNearLimit(t *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, }, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) @@ -360,7 +364,7 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) @@ -377,7 +381,7 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) @@ -394,7 +398,7 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(3), limits[0].Stats.NearLimit.Value()) @@ -411,7 +415,7 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) @@ -428,7 +432,7 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(7), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(4), limits[0].Stats.NearLimit.Value()) @@ -445,7 +449,7 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(3), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) @@ -475,7 +479,7 @@ func TestRedisWithJitter(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) @@ -513,7 +517,7 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, }, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) @@ -534,7 +538,7 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, }, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) @@ -556,7 +560,7 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, }, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) @@ -577,7 +581,7 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, }, - cache.DoLimit(nil, request, limits)) + cache.DoLimit(context.Background(), request, limits)) // TODO: How should we handle statistics? Should there be a separate ShadowMode statistics? Should the other Stats remain as if they were unaffected by shadowmode? assert.Equal(uint64(4), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -588,3 +592,34 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { // Check the local cache stats. testLocalCacheStats(localCacheStats, statsStore, sink, 1, 3, 4, 0, 1) } + +func TestRedisTracer(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + testSpanExporter.Reset() + + statsStore := gostats.NewStore(gostats.NewNullSink(), false) + sm := stats.NewMockStatManager(statsStore) + + client := mock_redis.NewMockClient(controller) + + timeSource := mock_utils.NewMockTimeSource(controller) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "", sm) + + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key_value_1234", uint32(1)).SetArg(1, uint32(5)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_1234", int64(1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + + request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} + cache.DoLimit(context.Background(), request, limits) + + spanStubs := testSpanExporter.GetSpans() + assert.NotNil(spanStubs) + assert.Len(spanStubs, 1) + assert.Equal(spanStubs[0].Name, "Redis Pipeline Execution") +} diff --git a/test/server/server_impl_test.go b/test/server/server_impl_test.go index 19a8e59f..235ba2a5 100644 --- a/test/server/server_impl_test.go +++ b/test/server/server_impl_test.go @@ -1,6 +1,7 @@ package server_test import ( + "context" "fmt" "io/ioutil" "net/http" @@ -60,25 +61,25 @@ func TestJsonHandler(t *testing.T) { assertHttpResponse(t, handler, "}", 400, "text/plain; charset=utf-8", "invalid character '}' looking for beginning of value\n") // Unknown response code - rls.EXPECT().ShouldRateLimit(nil, requestMatcher).Return(&pb.RateLimitResponse{}, nil) + rls.EXPECT().ShouldRateLimit(context.Background(), requestMatcher).Return(&pb.RateLimitResponse{}, nil) assertHttpResponse(t, handler, `{"domain": "foo"}`, 500, "application/json", "{}") // ratelimit service error - rls.EXPECT().ShouldRateLimit(nil, requestMatcher).Return(nil, fmt.Errorf("some error")) + rls.EXPECT().ShouldRateLimit(context.Background(), requestMatcher).Return(nil, fmt.Errorf("some error")) assertHttpResponse(t, handler, `{"domain": "foo"}`, 400, "text/plain; charset=utf-8", "some error\n") // json unmarshaling error - rls.EXPECT().ShouldRateLimit(nil, requestMatcher).Return(nil, nil) + rls.EXPECT().ShouldRateLimit(context.Background(), requestMatcher).Return(nil, nil) assertHttpResponse(t, handler, `{"domain": "foo"}`, 500, "text/plain; charset=utf-8", "error marshaling proto3 to json: Marshal called with nil\n") // successful request, not rate limited - rls.EXPECT().ShouldRateLimit(nil, requestMatcher).Return(&pb.RateLimitResponse{ + rls.EXPECT().ShouldRateLimit(context.Background(), requestMatcher).Return(&pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, }, nil) assertHttpResponse(t, handler, `{"domain": "foo"}`, 200, "application/json", `{"overallCode":"OK"}`) // successful request, rate limited - rls.EXPECT().ShouldRateLimit(nil, requestMatcher).Return(&pb.RateLimitResponse{ + rls.EXPECT().ShouldRateLimit(context.Background(), requestMatcher).Return(&pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OVER_LIMIT, }, nil) assertHttpResponse(t, handler, `{"domain": "foo"}`, 429, "application/json", `{"overallCode":"OVER_LIMIT"}`) diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index ead86ab6..86fed5e9 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -17,6 +17,8 @@ import ( "github.com/stretchr/testify/assert" "golang.org/x/net/context" + "github.com/envoyproxy/ratelimit/src/trace" + "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/redis" ratelimit "github.com/envoyproxy/ratelimit/src/service" @@ -100,9 +102,16 @@ func (this *rateLimitServiceTestSuite) setupBasicService() ratelimit.RateLimitSe this.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Return(this.config) + + // reset exporter before using + testSpanExporter.Reset() + return ratelimit.NewService(this.runtime, this.cache, this.configLoader, this.statsManager, true, MockClock{now: int64(2222)}, false) } +// once a ratelimit service is initiated, the package always fetches a default tracer from otel runtime and it can't be change until a new round of test is run. It is necessary to keep a package level exporter in this test package in order to correctly run the tests. +var testSpanExporter = trace.GetTestSpanExporter() + func TestService(test *testing.T) { t := commonSetup(test) defer t.controller.Finish() @@ -110,11 +119,11 @@ func TestService(test *testing.T) { // First request, config should be loaded. request := common.NewRateLimitRequest("test-domain", [][][2]string{{{"hello", "world"}}}, 1) - t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[0]).Return(nil) - t.cache.EXPECT().DoLimit(nil, request, []*config.RateLimit{nil}).Return( + t.config.EXPECT().GetLimit(context.Background(), "test-domain", request.Descriptors[0]).Return(nil) + t.cache.EXPECT().DoLimit(context.Background(), request, []*config.RateLimit{nil}).Return( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) - response, err := service.ShouldRateLimit(nil, request) + response, err := service.ShouldRateLimit(context.Background(), request) common.AssertProtoEqual( t.assert, &pb.RateLimitResponse{ @@ -139,14 +148,14 @@ func TestService(test *testing.T) { config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), nil, } - t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) - t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) - t.cache.EXPECT().DoLimit(nil, request, limits).Return( + t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[1]).Return(limits[1]) + t.cache.EXPECT().DoLimit(context.Background(), request, limits).Return( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, }) - response, err = service.ShouldRateLimit(nil, request) + response, err = service.ShouldRateLimit(context.Background(), request) common.AssertProtoEqual( t.assert, &pb.RateLimitResponse{ @@ -174,14 +183,14 @@ func TestService(test *testing.T) { nil, config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), } - t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) - t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) - t.cache.EXPECT().DoLimit(nil, request, limits).Return( + t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[1]).Return(limits[1]) + t.cache.EXPECT().DoLimit(context.Background(), request, limits).Return( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0}, }) - response, err = service.ShouldRateLimit(nil, request) + response, err = service.ShouldRateLimit(context.Background(), request) common.AssertProtoEqual( t.assert, &pb.RateLimitResponse{ @@ -228,14 +237,14 @@ func TestServiceGlobalShadowMode(test *testing.T) { config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), nil, } - t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) - t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) - t.cache.EXPECT().DoLimit(nil, request, limits).Return( + t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[1]).Return(limits[1]) + t.cache.EXPECT().DoLimit(context.Background(), request, limits).Return( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, }) - response, err := service.ShouldRateLimit(nil, request) + response, err := service.ShouldRateLimit(context.Background(), request) // OK overall code even if limit response was OVER_LIMIT common.AssertProtoEqual( @@ -268,14 +277,14 @@ func TestRuleShadowMode(test *testing.T) { config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true), config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true), } - t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) - t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) - t.cache.EXPECT().DoLimit(nil, request, limits).Return( + t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[1]).Return(limits[1]) + t.cache.EXPECT().DoLimit(context.Background(), request, limits).Return( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, }) - response, err := service.ShouldRateLimit(nil, request) + response, err := service.ShouldRateLimit(context.Background(), request) t.assert.Equal( &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, @@ -301,20 +310,20 @@ func TestMixedRuleShadowMode(test *testing.T) { config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true), config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), } - t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) - t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) + t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[1]).Return(limits[1]) testResults := []pb.RateLimitResponse_Code{pb.RateLimitResponse_OVER_LIMIT, pb.RateLimitResponse_OVER_LIMIT} for i := 0; i < len(limits); i++ { if limits[i].ShadowMode { testResults[i] = pb.RateLimitResponse_OK } } - t.cache.EXPECT().DoLimit(nil, request, limits).Return( + t.cache.EXPECT().DoLimit(context.Background(), request, limits).Return( []*pb.RateLimitResponse_DescriptorStatus{ {Code: testResults[0], CurrentLimit: limits[0].Limit, LimitRemaining: 0}, {Code: testResults[1], CurrentLimit: nil, LimitRemaining: 0}, }) - response, err := service.ShouldRateLimit(nil, request) + response, err := service.ShouldRateLimit(context.Background(), request) t.assert.Equal( &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OVER_LIMIT, @@ -360,15 +369,15 @@ func TestServiceWithCustomRatelimitHeaders(test *testing.T) { config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), nil, } - t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) - t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) - t.cache.EXPECT().DoLimit(nil, request, limits).Return( + t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[1]).Return(limits[1]) + t.cache.EXPECT().DoLimit(context.Background(), request, limits).Return( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, }) - response, err := service.ShouldRateLimit(nil, request) + response, err := service.ShouldRateLimit(context.Background(), request) common.AssertProtoEqual( t.assert, &pb.RateLimitResponse{ @@ -412,15 +421,15 @@ func TestServiceWithDefaultRatelimitHeaders(test *testing.T) { config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), nil, } - t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) - t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) - t.cache.EXPECT().DoLimit(nil, request, limits).Return( + t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[1]).Return(limits[1]) + t.cache.EXPECT().DoLimit(context.Background(), request, limits).Return( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, }) - response, err := service.ShouldRateLimit(nil, request) + response, err := service.ShouldRateLimit(context.Background(), request) common.AssertProtoEqual( t.assert, &pb.RateLimitResponse{ @@ -445,7 +454,7 @@ func TestEmptyDomain(test *testing.T) { service := t.setupBasicService() request := common.NewRateLimitRequest("", [][][2]string{{{"hello", "world"}}}, 1) - response, err := service.ShouldRateLimit(nil, request) + response, err := service.ShouldRateLimit(context.Background(), request) t.assert.Nil(response) t.assert.Equal("rate limit domain must not be empty", err.Error()) t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) @@ -457,7 +466,7 @@ func TestEmptyDescriptors(test *testing.T) { service := t.setupBasicService() request := common.NewRateLimitRequest("test-domain", [][][2]string{}, 1) - response, err := service.ShouldRateLimit(nil, request) + response, err := service.ShouldRateLimit(context.Background(), request) t.assert.Nil(response) t.assert.Equal("rate limit descriptor list must not be empty", err.Error()) t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) @@ -470,13 +479,13 @@ func TestCacheError(test *testing.T) { request := common.NewRateLimitRequest("different-domain", [][][2]string{{{"foo", "bar"}}}, 1) limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false)} - t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) - t.cache.EXPECT().DoLimit(nil, request, limits).Do( + t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) + t.cache.EXPECT().DoLimit(context.Background(), request, limits).Do( func(context.Context, *pb.RateLimitRequest, []*config.RateLimit) { panic(redis.RedisError("cache error")) }) - response, err := service.ShouldRateLimit(nil, request) + response, err := service.ShouldRateLimit(context.Background(), request) t.assert.Nil(response) t.assert.Equal("cache error", err.Error()) t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.redis_error").Value()) @@ -499,7 +508,7 @@ func TestInitialLoadError(test *testing.T) { service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statsManager, true, t.mockClock, false) request := common.NewRateLimitRequest("test-domain", [][][2]string{{{"hello", "world"}}}, 1) - response, err := service.ShouldRateLimit(nil, request) + response, err := service.ShouldRateLimit(context.Background(), request) t.assert.Nil(response) t.assert.Equal("no rate limit configuration loaded", err.Error()) t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) @@ -517,20 +526,20 @@ func TestUnlimited(test *testing.T) { nil, config.NewRateLimit(55, pb.RateLimitResponse_RateLimit_SECOND, t.statsManager.NewStats("baz_qux"), true, false), } - t.config.EXPECT().GetLimit(nil, "some-domain", request.Descriptors[0]).Return(limits[0]) - t.config.EXPECT().GetLimit(nil, "some-domain", request.Descriptors[1]).Return(limits[1]) - t.config.EXPECT().GetLimit(nil, "some-domain", request.Descriptors[2]).Return(limits[2]) + t.config.EXPECT().GetLimit(context.Background(), "some-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(context.Background(), "some-domain", request.Descriptors[1]).Return(limits[1]) + t.config.EXPECT().GetLimit(context.Background(), "some-domain", request.Descriptors[2]).Return(limits[2]) // Unlimited descriptors should not hit the cache expectedCacheLimits := []*config.RateLimit{limits[0], nil, nil} - t.cache.EXPECT().DoLimit(nil, request, expectedCacheLimits).Return([]*pb.RateLimitResponse_DescriptorStatus{ + t.cache.EXPECT().DoLimit(context.Background(), request, expectedCacheLimits).Return([]*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9}, {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, }) - response, err := service.ShouldRateLimit(nil, request) + response, err := service.ShouldRateLimit(context.Background(), request) common.AssertProtoEqual( t.assert, &pb.RateLimitResponse{ @@ -544,3 +553,30 @@ func TestUnlimited(test *testing.T) { response) t.assert.Nil(err) } + +func TestServiceTracer(test *testing.T) { + t := commonSetup(test) + defer t.controller.Finish() + service := t.setupBasicService() + + // First request, config should be loaded. + request := common.NewRateLimitRequest("test-domain", [][][2]string{{{"hello", "world"}}}, 1) + t.config.EXPECT().GetLimit(context.Background(), "test-domain", request.Descriptors[0]).Return(nil) + t.cache.EXPECT().DoLimit(context.Background(), request, []*config.RateLimit{nil}).Return( + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) + + response, err := service.ShouldRateLimit(context.Background(), request) + common.AssertProtoEqual( + t.assert, + &pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OK, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}, + }, + response) + t.assert.Nil(err) + + spanStubs := testSpanExporter.GetSpans() + t.assert.NotNil(spanStubs) + t.assert.Len(spanStubs, 1) + t.assert.Equal(spanStubs[0].Name, "ShouldRateLimit Execution") +} From 548acf0f0014abc526c28a7e45ea595c0f8e8d89 Mon Sep 17 00:00:00 2001 From: Harsimran Singh Maan Date: Fri, 20 May 2022 13:03:46 -0700 Subject: [PATCH 038/181] Add support for mTLS between envoy and the rate limiter (#336) See the README.md/mTLS for the setup instructions Refactors the TlsConfigFromFiles method into utils to allow reuse across client, server and integration test packages mTLS is optional and is turned off by default --- .github/workflows/main.yaml | 2 +- .github/workflows/pullrequest.yaml | 2 +- Dockerfile | 2 +- Dockerfile.integration | 2 +- README.md | 40 +++++++- go.mod | 2 +- go.sum | 4 - src/client_cmd/main.go | 22 ++++- src/server/server_impl.go | 15 ++- src/server/tls.go | 34 +++++++ src/settings/settings.go | 82 ++++++++-------- src/utils/tls.go | 52 ++++++++++ test/integration/integration_test.go | 38 +++++++- test/integration/mtls_test.go | 136 +++++++++++++++++++++++++++ 14 files changed, 368 insertions(+), 65 deletions(-) create mode 100644 src/server/tls.go create mode 100644 src/utils/tls.go create mode 100644 test/integration/mtls_test.go diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 48bf0536..10fd7468 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -46,7 +46,7 @@ jobs: - uses: actions/setup-go@v2 with: - go-version: "1.16" + go-version: "1.18" - name: run pre-commits run: | diff --git a/.github/workflows/pullrequest.yaml b/.github/workflows/pullrequest.yaml index 164a61a4..527b4523 100644 --- a/.github/workflows/pullrequest.yaml +++ b/.github/workflows/pullrequest.yaml @@ -34,7 +34,7 @@ jobs: - uses: actions/setup-go@v2 with: - go-version: "1.16" + go-version: "1.18" - name: run pre-commits run: | diff --git a/Dockerfile b/Dockerfile index 02d8ed30..36564b89 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17 AS build +FROM golang:1.18 AS build WORKDIR /ratelimit ENV GOPROXY=https://proxy.golang.org diff --git a/Dockerfile.integration b/Dockerfile.integration index 86120108..f1ef81ff 100644 --- a/Dockerfile.integration +++ b/Dockerfile.integration @@ -1,5 +1,5 @@ # Running this docker image runs the integration tests. -FROM golang:1.17 +FROM golang:1.18 RUN apt-get update -y && apt-get install sudo stunnel4 redis memcached -y && rm -rf /var/lib/apt/lists/* diff --git a/README.md b/README.md index c8616cfa..01b598d0 100644 --- a/README.md +++ b/README.md @@ -46,6 +46,7 @@ - [Memcache](#memcache) - [Custom headers](#custom-headers) - [Tracing](#tracing) +- [mTLS](#mtls) - [Contact](#contact) @@ -764,9 +765,9 @@ docker run -d --name jaeger -p 16686:16686 -p 14250:14250 jaegertracing/all-in-o # Tracing -Ratelimit supports exporting spans in OLTP format. See [OpenTelemetry](https://opentelemetry.io/) for more information. +Ratelimit service supports exporting spans in OLTP format. See [OpenTelemetry](https://opentelemetry.io/) for more information. -Theh following environment variables control the tracing feature: +The following environment variables control the tracing feature: 1. `TRACING_ENABLED` - Enables the tracing feature. Only "true" and "false"(default) are allowed in this field. 1. `TRACING_EXPORTER_PROTOCOL` - Controls the protocol of exporter in tracing feature. Only "http"(default) and "grpc" are allowed in this field. @@ -775,6 +776,41 @@ Theh following environment variables control the tracing feature: 1. `TRACING_SERVICE_INSTANCE_ID` - Controls the service instance id appears in tracing span. It is recommended to put the pod name or container name in this field. The default value is a randomly generated version 4 uuid if unspecified. 1. Other fields in [OTLP Exporter Documentation](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.8.0/specification/protocol/exporter.md). These section needs to be correctly configured in order to enable the exporter to export span to the correct destination. +# mTLS + +Ratelimit supports mTLS when Envoy sends requests to the service. + +The following environment variables control the mTLS feature: + +The following variables can be set to enable mTLS on the Ratelimit service. + +1. `GRPC_SERVER_USE_TLS` - Enables gprc connections to server over TLS +1. `GRPC_SERVER_TLS_CERT` - Path to the file containing the server cert chain +1. `GRPC_SERVER_TLS_KEY` - Path to the file containing the server private key +1. `GRPC_CLIENT_TLS_CACERT` - Path to the file containing the client CA certificate. +1. `GRPC_CLIENT_TLS_SAN` - (Optional) DNS Name to validate from the client cert during mTLS auth + +In the envoy config use, add the `transport_socket` section to the ratelimit service cluster config + +```yaml +"name": "ratelimit" +"transport_socket": + "name": "envoy.transport_sockets.tls" + "typed_config": + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext" + "common_tls_context": + "tls_certificates": + - "certificate_chain": + "filename": "/opt/envoy/tls/ratelimit-client-cert.pem" + "private_key": + "filename": "/opt/envoy/tls/ratelimit-client-key.pem" + "validation_context": + "match_subject_alt_names": + - "exact": "ratelimit.server.dnsname" + "trusted_ca": + "filename": "/opt/envoy/tls/ratelimit-server-ca.pem" +``` + # Contact - [envoy-announce](https://groups.google.com/forum/#!forum/envoy-announce): Low frequency mailing diff --git a/go.mod b/go.mod index 5ce4462c..5ce950c3 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/envoyproxy/ratelimit -go 1.17 +go 1.18 require ( github.com/alicebob/miniredis/v2 v2.11.4 diff --git a/go.sum b/go.sum index 70f0c04c..61bd52b2 100644 --- a/go.sum +++ b/go.sum @@ -151,7 +151,6 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 h1:VQjjMh+uElTfioy6GnUrVrTMAiLTNF3xsrAlSwC+g8o= github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= @@ -350,12 +349,9 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/src/client_cmd/main.go b/src/client_cmd/main.go index 7578a5bf..57a25e50 100644 --- a/src/client_cmd/main.go +++ b/src/client_cmd/main.go @@ -12,6 +12,9 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "golang.org/x/net/context" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/envoyproxy/ratelimit/src/utils" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" @@ -65,7 +68,11 @@ func main() { flag.Var( &descriptorsValue, "descriptors", "descriptor list to query in =,=,... form") - oltpProtocol := flag.String("oltp_protocol", "", "protocol to use when exporting tracing span, accept http, grpc or empty (disable tracing) as value, please use OLTP environment variables to set endpoint (refer to README.MD)") + oltpProtocol := flag.String("oltp-protocol", "", "protocol to use when exporting tracing span, accept http, grpc or empty (disable tracing) as value, please use OLTP environment variables to set endpoint (refer to README.MD)") + grpcServerTlsCACert := flag.String("grpc-server-ca-file", "", "path to the server CA file for TLS connection") + grpcUseTLS := flag.Bool("grpc-use-tls", false, "Use TLS for connection to server") + grpcTlsCertFile := flag.String("grpc-cert-file", "", "path to the client cert file for TLS connection") + grpcTlsKeyFile := flag.String("grpc-key-file", "", "path to the client key for TLS connection") flag.Parse() flag.VisitAll(func(f *flag.Flag) { @@ -80,12 +87,17 @@ func main() { } }() } - - conn, err := grpc.Dial(*dialString, - grpc.WithInsecure(), + dialOptions := []grpc.DialOption{ grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor()), grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor()), - ) + } + if *grpcUseTLS { + tlsConfig := utils.TlsConfigFromFiles(*grpcTlsCertFile, *grpcTlsKeyFile, *grpcServerTlsCACert, utils.ServerCA) + dialOptions = append(dialOptions, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) + } else { + dialOptions = append(dialOptions, grpc.WithInsecure()) + } + conn, err := grpc.Dial(*dialString, dialOptions...) if err != nil { fmt.Printf("error connecting: %s\n", err.Error()) os.Exit(1) diff --git a/src/server/server_impl.go b/src/server/server_impl.go index 1f09003c..8fa31d2d 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -17,6 +17,7 @@ import ( "sync" "syscall" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" "github.com/envoyproxy/ratelimit/src/stats" @@ -202,15 +203,23 @@ func newServer(s settings.Settings, name string, statsManager stats.Manager, loc MaxConnectionAge: s.GrpcMaxConnectionAge, MaxConnectionAgeGrace: s.GrpcMaxConnectionAgeGrace, }) - - ret.grpcServer = grpc.NewServer( + grpcOptions := []grpc.ServerOption{ keepaliveOpt, grpc.ChainUnaryInterceptor( s.GrpcUnaryInterceptor, // chain otel interceptor after the input interceptor otelgrpc.UnaryServerInterceptor(), ), grpc.StreamInterceptor(otelgrpc.StreamServerInterceptor()), - ) + } + if s.GrpcServerUseTLS { + grpcServerTlsConfig := s.GrpcServerTlsConfig + // Verify client SAN if provided + if s.GrpcClientTlsSAN != "" { + grpcServerTlsConfig.VerifyPeerCertificate = verifyClient(grpcServerTlsConfig.ClientCAs, s.GrpcClientTlsSAN) + } + grpcOptions = append(grpcOptions, grpc.Creds(credentials.NewTLS(grpcServerTlsConfig))) + } + ret.grpcServer = grpc.NewServer(grpcOptions...) // setup listen addresses ret.httpAddress = net.JoinHostPort(s.Host, strconv.Itoa(s.Port)) diff --git a/src/server/tls.go b/src/server/tls.go new file mode 100644 index 00000000..e0fc051c --- /dev/null +++ b/src/server/tls.go @@ -0,0 +1,34 @@ +package server + +import ( + "crypto/x509" + "errors" + + logger "github.com/sirupsen/logrus" +) + +func verifyClient(clientCAPool *x509.CertPool, clientSAN string) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + for _, certs := range verifiedChains { + opts := x509.VerifyOptions{ + Roots: clientCAPool, + Intermediates: x509.NewCertPool(), + DNSName: clientSAN, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } + if len(certs) < 1 { + return errors.New("missing client cert") + } + // Get intermediates if any + for _, cert := range certs[1:] { + opts.Intermediates.AddCert(cert) + } + _, err := certs[0].Verify(opts) + if err != nil { + logger.Warnf("error validating client: %s", err.Error()) + return err + } + } + return nil + } +} diff --git a/src/settings/settings.go b/src/settings/settings.go index 222bc6f1..c88c186e 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -2,13 +2,12 @@ package settings import ( "crypto/tls" - "crypto/x509" - "fmt" - "os" "time" "github.com/kelseyhightower/envconfig" "google.golang.org/grpc" + + "github.com/envoyproxy/ratelimit/src/utils" ) type Settings struct { @@ -18,18 +17,31 @@ type Settings struct { // Server listen address config Host string `envconfig:"HOST" default:"0.0.0.0"` Port int `envconfig:"PORT" default:"8080"` - GrpcHost string `envconfig:"GRPC_HOST" default:"0.0.0.0"` - GrpcPort int `envconfig:"GRPC_PORT" default:"8081"` DebugHost string `envconfig:"DEBUG_HOST" default:"0.0.0.0"` DebugPort int `envconfig:"DEBUG_PORT" default:"6070"` // GRPC server settings + GrpcHost string `envconfig:"GRPC_HOST" default:"0.0.0.0"` + GrpcPort int `envconfig:"GRPC_PORT" default:"8081"` + // GrpcServerTlsConfig configures grpc for the server + GrpcServerTlsConfig *tls.Config // GrpcMaxConnectionAge is a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway. // A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms. GrpcMaxConnectionAge time.Duration `envconfig:"GRPC_MAX_CONNECTION_AGE" default:"24h" description:"Duration a connection may exist before it will be closed by sending a GoAway."` // GrpcMaxConnectionAgeGrace is an additive period after MaxConnectionAge after which the connection will be forcibly closed. GrpcMaxConnectionAgeGrace time.Duration `envconfig:"GRPC_MAX_CONNECTION_AGE_GRACE" default:"1h" description:"Period after MaxConnectionAge after which the connection will be forcibly closed."` - + // GrpcServerUseTLS enables gprc connections to server over TLS + GrpcServerUseTLS bool `envconfig:"GRPC_SERVER_USE_TLS" default:"false"` + // Allow to set the server certificate and key for TLS connections. + // GrpcServerTlsCert is the path to the file containing the server cert chain + GrpcServerTlsCert string `envconfig:"GRPC_SERVER_TLS_CERT" default:""` + // GrpcServerTlsKey is the path to the file containing the server private key + GrpcServerTlsKey string `envconfig:"GRPC_SERVER_TLS_KEY" default:""` + // GrpcClientTlsCACert is the path to the file containing the client CA certificate. + // Use for validating client certificate + GrpcClientTlsCACert string `envconfig:"GRPC_CLIENT_TLS_CACERT" default:""` + // GrpcClientTlsSAN is the SAN to validate from the client cert during mTLS auth + GrpcClientTlsSAN string `envconfig:"GRPC_CLIENT_TLS_SAN" default:""` // Logging settings LogLevel string `envconfig:"LOG_LEVEL" default:"WARN"` LogFormat string `envconfig:"LOG_FORMAT" default:"text"` @@ -129,54 +141,40 @@ func NewSettings() Settings { if err := envconfig.Process("", &s); err != nil { panic(err) } - - // Golang copy-by-value causes the RootCAs to no longer be nil - // which isn't the expected default behavior of continuing to use system roots - // so let's just initialize to what we want the correct value to be. - s.RedisTlsConfig = &tls.Config{} - - // When we require to connect using TLS, we check if we need to connect using the provided key-pair. - if s.RedisTls || s.RedisPerSecondTls { - TlsConfigFromFiles(s.RedisTlsClientCert, s.RedisTlsClientKey, s.RedisTlsCACert)(&s) - } - + // When we require TLS to connect to Redis, we check if we need to connect using the provided key-pair. + RedisTlsConfig(s.RedisTls || s.RedisPerSecondTls)(&s) + GrpcServerTlsConfig()(&s) return s } -func GrpcUnaryInterceptor(i grpc.UnaryServerInterceptor) Option { +func RedisTlsConfig(redisTls bool) Option { return func(s *Settings) { - s.GrpcUnaryInterceptor = i + // Golang copy-by-value causes the RootCAs to no longer be nil + // which isn't the expected default behavior of continuing to use system roots + // so let's just initialize to what we want the correct value to be. + s.RedisTlsConfig = &tls.Config{} + if redisTls { + s.RedisTlsConfig = utils.TlsConfigFromFiles(s.RedisTlsClientCert, s.RedisTlsClientKey, s.RedisTlsCACert, utils.ServerCA) + } } } -// TlsConfigFromFiles sets the TLS config from the provided files. -func TlsConfigFromFiles(cert, key, caCert string) Option { +func GrpcServerTlsConfig() Option { return func(s *Settings) { - if s.RedisTlsConfig == nil { - s.RedisTlsConfig = new(tls.Config) - } - if cert != "" && key != "" { - clientCert, err := tls.LoadX509KeyPair(cert, key) - if err != nil { - panic(fmt.Errorf("failed lo load client TLS key pair: %w", err)) - } - s.RedisTlsConfig.Certificates = append(s.RedisTlsConfig.Certificates, clientCert) - } - - if caCert != "" { - certPool := x509.NewCertPool() - if !certPool.AppendCertsFromPEM(mustReadFile(caCert)) { - panic("failed to load the provided TLS CA certificate") + if s.GrpcServerUseTLS { + grpcServerTlsConfig := utils.TlsConfigFromFiles(s.GrpcServerTlsCert, s.GrpcServerTlsKey, s.GrpcClientTlsCACert, utils.ClientCA) + if s.GrpcClientTlsCACert != "" { + grpcServerTlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + } else { + grpcServerTlsConfig.ClientAuth = tls.NoClientCert } - s.RedisTlsConfig.RootCAs = certPool + s.GrpcServerTlsConfig = grpcServerTlsConfig } } } -func mustReadFile(name string) []byte { - b, err := os.ReadFile(name) - if err != nil { - panic(fmt.Errorf("failed to read file: %s: %w", name, err)) +func GrpcUnaryInterceptor(i grpc.UnaryServerInterceptor) Option { + return func(s *Settings) { + s.GrpcUnaryInterceptor = i } - return b } diff --git a/src/utils/tls.go b/src/utils/tls.go new file mode 100644 index 00000000..76085e22 --- /dev/null +++ b/src/utils/tls.go @@ -0,0 +1,52 @@ +package utils + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "os" +) + +type CAType int + +const ( + ClientCA CAType = iota + ServerCA +) + +// TlsConfigFromFiles sets the TLS config from the provided files. +func TlsConfigFromFiles(certFile, keyFile, caCertFile string, caType CAType) *tls.Config { + config := &tls.Config{} + if certFile != "" && keyFile != "" { + tlsKeyPair, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + panic(fmt.Errorf("failed lo load TLS key pair (%s,%s): %w", certFile, keyFile, err)) + } + config.Certificates = append(config.Certificates, tlsKeyPair) + } + if caCertFile != "" { + // try to get the SystemCertPool first + certPool, _ := x509.SystemCertPool() + if certPool == nil { + certPool = x509.NewCertPool() + } + if !certPool.AppendCertsFromPEM(mustReadFile(caCertFile)) { + panic(fmt.Errorf("failed to load the provided TLS CA certificate: %s", caCertFile)) + } + switch caType { + case ClientCA: + config.ClientCAs = certPool + case ServerCA: + config.RootCAs = certPool + } + } + return config +} + +func mustReadFile(name string) []byte { + b, err := os.ReadFile(name) + if err != nil { + panic(fmt.Errorf("failed to read file: %s: %w", name, err)) + } + return b +} diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 4bf732bf..597ea1f3 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -1,5 +1,4 @@ //go:build integration -// +build integration package integration_test @@ -20,10 +19,12 @@ import ( "github.com/stretchr/testify/assert" "golang.org/x/net/context" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" "github.com/envoyproxy/ratelimit/src/memcached" "github.com/envoyproxy/ratelimit/src/service_cmd/runner" "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/utils" "github.com/envoyproxy/ratelimit/test/common" ) @@ -229,6 +230,32 @@ func TestMultiNodeMemcache(t *testing.T) { }) } +func Test_mTLS(t *testing.T) { + s := makeSimpleRedisSettings(16381, 16382, false, 0) + s.RedisTlsConfig = &tls.Config{} + s.RedisAuth = "password123" + s.RedisTls = true + s.RedisPerSecondAuth = "password123" + s.RedisPerSecondTls = true + assert := assert.New(t) + serverCAFile, serverCertFile, serverCertKey, err := mTLSSetup(utils.ServerCA) + assert.NoError(err) + clientCAFile, clientCertFile, clientCertKey, err := mTLSSetup(utils.ClientCA) + assert.NoError(err) + s.GrpcServerUseTLS = true + s.GrpcServerTlsCert = serverCertFile + s.GrpcServerTlsKey = serverCertKey + s.GrpcClientTlsCACert = clientCAFile + s.GrpcClientTlsSAN = "localhost" + settings.GrpcServerTlsConfig()(&s) + runner := startTestRunner(t, s) + defer runner.Stop() + clientTlsConfig := utils.TlsConfigFromFiles(clientCertFile, clientCertKey, serverCAFile, utils.ServerCA) + conn, err := grpc.Dial(fmt.Sprintf("localhost:%v", s.GrpcPort), grpc.WithTransportCredentials(credentials.NewTLS(clientTlsConfig))) + assert.NoError(err) + defer conn.Close() +} + func testBasicConfigAuthTLS(perSecond bool, local_cache_size int) func(*testing.T) { s := makeSimpleRedisSettings(16381, 16382, perSecond, local_cache_size) s.RedisTlsConfig = &tls.Config{} @@ -245,11 +272,14 @@ func testBasicConfigAuthTLSWithClientCert(perSecond bool, local_cache_size int) // verifies the peer certificate against the defined CA certificate (CAfile)). // See: Makefile#REDIS_VERIFY_PEER_STUNNEL. s := makeSimpleRedisSettings(16361, 16382, perSecond, local_cache_size) - settings.TlsConfigFromFiles(filepath.Join(projectDir, "cert.pem"), filepath.Join(projectDir, "key.pem"), filepath.Join(projectDir, "cert.pem"))(&s) - s.RedisAuth = "password123" + s.RedisTlsClientCert = filepath.Join(projectDir, "cert.pem") + s.RedisTlsClientKey = filepath.Join(projectDir, "key.pem") + s.RedisTlsCACert = filepath.Join(projectDir, "cert.pem") s.RedisTls = true - s.RedisPerSecondAuth = "password123" s.RedisPerSecondTls = true + settings.RedisTlsConfig(s.RedisTls || s.RedisPerSecondTls)(&s) + s.RedisAuth = "password123" + s.RedisPerSecondAuth = "password123" return testBasicBaseConfig(s) } diff --git a/test/integration/mtls_test.go b/test/integration/mtls_test.go new file mode 100644 index 00000000..59d2e657 --- /dev/null +++ b/test/integration/mtls_test.go @@ -0,0 +1,136 @@ +//go:build integration + +package integration_test + +import ( + "bytes" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "os" + "testing" + "time" + + "github.com/envoyproxy/ratelimit/src/utils" +) + +func createCA() (caFileName string, ca *x509.Certificate, pk *rsa.PrivateKey, err error) { + ca = &x509.Certificate{ + SerialNumber: big.NewInt(2022), + Subject: pkix.Name{ + Organization: []string{"Acme CA"}, + Country: []string{"CA"}, + Province: []string{"BC"}, + Locality: []string{"Vancouver"}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(1, 0, 0), + IsCA: true, + BasicConstraintsValid: true, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + } + caPrivKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return "", nil, nil, err + } + // create the CA + caBytes, err := x509.CreateCertificate(rand.Reader, ca, ca, &caPrivKey.PublicKey, caPrivKey) + if err != nil { + return "", nil, nil, err + } + caPEM := new(bytes.Buffer) + pem.Encode(caPEM, &pem.Block{ + Type: "CERTIFICATE", + Bytes: caBytes, + }) + cafileName, err := writeContentToTempFile(caPEM.Bytes(), "ca.pem") + return cafileName, ca, caPrivKey, err +} + +func writeContentToTempFile(content []byte, filenameprefix string) (filename string, err error) { + f, err := os.CreateTemp("", filenameprefix) + if err != nil { + return "", err + } + _, err = f.Write(content) + if err != nil { + return "", err + } + err = f.Close() + return f.Name(), err +} + +func signCert(caType utils.CAType, ca *x509.Certificate, caPK *rsa.PrivateKey) (certFile string, keyFile string, err error) { + keyUsage := x509.ExtKeyUsageServerAuth + name := "Server" + var sn int64 = 2021 + if caType == utils.ClientCA { + keyUsage = x509.ExtKeyUsageClientAuth + name = "Client" + sn = 2020 + } + cert := &x509.Certificate{ + SerialNumber: big.NewInt(sn), + Subject: pkix.Name{ + Organization: []string{name}, + Country: []string{"CA"}, + Province: []string{"BC"}, + Locality: []string{"Vancouver"}, + }, + DNSNames: []string{"localhost"}, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(0, 5, 0), + ExtKeyUsage: []x509.ExtKeyUsage{keyUsage}, + KeyUsage: x509.KeyUsageDigitalSignature, + } + + certPrivKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return "", "", err + } + certBytes, err := x509.CreateCertificate(rand.Reader, cert, ca, &certPrivKey.PublicKey, caPK) + if err != nil { + return "", "", err + } + certPEM := new(bytes.Buffer) + pem.Encode(certPEM, &pem.Block{ + Type: "CERTIFICATE", + Bytes: certBytes, + }) + certfileName, err := writeContentToTempFile(certPEM.Bytes(), fmt.Sprintf("%s-cert.pem", name)) + if err != nil { + return "", "", err + } + certPrivKeyPEM := new(bytes.Buffer) + pem.Encode(certPrivKeyPEM, &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(certPrivKey), + }) + + pkFileName, err := writeContentToTempFile(certPrivKeyPEM.Bytes(), fmt.Sprintf("%s-key.pem", name)) + if err != nil { + return "", "", err + } + return certfileName, pkFileName, nil +} + +func mTLSSetup(caType utils.CAType) (caFile string, certFile string, keyFile string, err error) { + caFile, serverCA, serverCApk, err := createCA() + if err != nil { + return "", "", "", err + } + certFile, keyFile, err = signCert(caType, serverCA, serverCApk) + return caFile, certFile, keyFile, err +} + +func Test_mTLSSetup(t *testing.T) { + _, _, _, err := mTLSSetup(utils.ServerCA) + if err != nil { + t.Fatal(err) + } +} From 8655817d2bb155bf46b083876ffa0f74d9b3335b Mon Sep 17 00:00:00 2001 From: Andre Ferraz <31141+deferraz@users.noreply.github.com> Date: Thu, 23 Jun 2022 21:27:58 -0300 Subject: [PATCH 039/181] fix typed_config for envoy.filters.http.router (#340) Signed-off-by: Andre Ferraz Co-authored-by: Andre Ferraz --- examples/envoy/mock.yaml | 3 ++- examples/envoy/proxy.yaml | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/examples/envoy/mock.yaml b/examples/envoy/mock.yaml index 87574494..e3fe0418 100644 --- a/examples/envoy/mock.yaml +++ b/examples/envoy/mock.yaml @@ -26,7 +26,8 @@ static_resources: inline_string: "Hello World" http_filters: - name: envoy.filters.http.router - typed_config: {} + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router admin: access_log_path: "/dev/null" address: diff --git a/examples/envoy/proxy.yaml b/examples/envoy/proxy.yaml index 1bf48c7e..eb5b1b9f 100644 --- a/examples/envoy/proxy.yaml +++ b/examples/envoy/proxy.yaml @@ -62,7 +62,8 @@ static_resources: cluster_name: ratelimit transport_api_version: V3 - name: envoy.filters.http.router - typed_config: {} + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router route_config: name: route virtual_hosts: From 1f4ea68e5241b0787fe28276ff3d9ad2f6e01b53 Mon Sep 17 00:00:00 2001 From: Bailey Thompson Date: Wed, 13 Jul 2022 11:14:43 -0400 Subject: [PATCH 040/181] Add descriptor replacing (#344) Signed-off-by: Bailey Thompson --- .gitignore | 1 + README.md | 63 +++++++++++++- examples/envoy/proxy.yaml | 42 ++++++++++ examples/ratelimit/config/example.yaml | 35 ++++++++ go.sum | 4 + integration-test/scripts/replaces.sh | 109 +++++++++++++++++++++++++ src/config/config.go | 2 + src/config/config_impl.go | 51 ++++++++++-- src/service/ratelimit.go | 25 +++++- test/config/config_test.go | 22 +++++ test/config/replaces_empty.yaml | 12 +++ test/config/replaces_self.yaml | 12 +++ test/limiter/base_limiter_test.go | 16 ++-- test/memcached/cache_impl_test.go | 36 ++++---- test/redis/bench_test.go | 2 +- test/redis/fixed_cache_impl_test.go | 30 +++---- test/service/ratelimit_test.go | 24 +++--- 17 files changed, 422 insertions(+), 64 deletions(-) create mode 100755 integration-test/scripts/replaces.sh create mode 100644 test/config/replaces_empty.yaml create mode 100644 test/config/replaces_self.yaml diff --git a/.gitignore b/.gitignore index 47bc3e84..37c261b4 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ private.pem redis-per-second.conf redis.conf redis-verify-peer.conf +dump.rdb # Directories created by "test_with_redis" make target. 63* diff --git a/README.md b/README.md index 01b598d0..5abd0fa1 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,7 @@ - [Definitions](#definitions) - [Descriptor list definition](#descriptor-list-definition) - [Rate limit definition](#rate-limit-definition) + - [Replaces](#replaces) - [ShadowMode](#shadowmode) - [Examples](#examples) - [Example 1](#example-1) @@ -21,7 +22,8 @@ - [Example 3](#example-3) - [Example 4](#example-4) - [Example 5](#example-5) - - [Example 6](#example-6) + - [Example 6](#example-6) + - [Example 7](#example-7) - [Loading Configuration](#loading-configuration) - [Log Format](#log-format) - [GRPC Keepalive](#grpc-keepalive) @@ -197,6 +199,9 @@ descriptors: - key: value: rate_limit: (optional block) + name: (optional) + replaces: (optional) + - name: (optional) unit: requests_per_unit: shadow_mode: (optional) @@ -221,6 +226,23 @@ The rate limit block specifies the actual rate limit that will be used when ther Currently the service supports per second, minute, hour, and day limits. More types of limits may be added in the future based on user demand. +### Replaces + +The replaces key indicates that this descriptor will replace the configuration set by another descriptor. + +If there is a rule being evaluated, and multiple descriptors can apply, the replaces descriptor will drop evaluation of +the descriptor which it is replacing. + +To enable this, any descriptor which should potentially be replaced by another should have a name keyword in the +rate_limit section, and any descriptor which should potentially replace the original descriptor should have a name +keyword in its respective replaces section. Whenever limits match to both rules, only the rule which replaces the +original will take effect, and the limit of the original will not be changed after evaluation. + +For example, let's say you have a bunch of endpoints and each is classified under read or write, with read having a +certain limit and write having another. Each user has a certain limit for both endpoints. However, let's say that you +want to increase a user's limit to a single read endpoint. The only option without using replaces would be to increase +their limit for the read category. The replaces keyword allows increasing the limit of a single endpoint in this case. + ### ShadowMode A shadow_mode key in a rule indicates that whatever the outcome of the evaluation of the rule, the end-result will always be "OK". @@ -399,7 +421,7 @@ This can be useful for collecting statistics, or if one wants to define a descri The return value for unlimited descriptors will be an OK status code with the LimitRemaining field set to MaxUint32 value. -### Example 6 +#### Example 6 A rule using shadow_mode is useful for soft-launching rate limiting. In this example @@ -431,6 +453,43 @@ descriptors: unit: second ``` +#### Example 7 + +When the replaces keyword is used, that limit will replace any limit which has the name being replaced as its name, and +the original descriptor's limit will not be affected. + +In the example below, the following limits will apply: + +``` +(key_1, value_1), (user, bkthomps): 5 / sec +(key_2, value_2), (user, bkthomps): 10 / sec +(key_1, value_1), (key_2, value_2), (user, bkthomps): 10 / sec since the (key_1, value_1), (user, bkthomps) rule was replaced and this will not affect the 5 / sec limit that would take effect with (key_2, value_2), (user, bkthomps) +``` + +```yaml +domain: example7 +descriptors: + - key: key_1 + value: value_1 + descriptors: + - key: user + value: bkthomps + rate_limit: + name: specific_limit + requests_per_unit: 5 + unit: second + - key: key_2 + value: value_2 + descriptors: + - key: user + value: bkthomps + rate_limit: + replaces: + - name: specific_limit + requests_per_unit: 10 + unit: second +``` + ## Loading Configuration The Ratelimit service uses a library written by Lyft called [goruntime](https://github.com/lyft/goruntime) to do configuration loading. Goruntime monitors diff --git a/examples/envoy/proxy.yaml b/examples/envoy/proxy.yaml index eb5b1b9f..a0e2d1e9 100644 --- a/examples/envoy/proxy.yaml +++ b/examples/envoy/proxy.yaml @@ -107,3 +107,45 @@ static_resources: - request_headers: header_name: "baz" descriptor_key: "baz" + - match: + prefix: /fourheader + route: + cluster: mock + rate_limits: + - actions: + - request_headers: + header_name: "foo" + descriptor_key: "foo" + - request_headers: + header_name: "bar" + descriptor_key: "bar" + - actions: + - request_headers: + header_name: "source_cluster" + descriptor_key: "source_cluster" + - request_headers: + header_name: "destination_cluster" + descriptor_key: "destination_cluster" + - match: + prefix: /fiveheader + route: + cluster: mock + rate_limits: + - actions: + - request_headers: + header_name: "foo" + descriptor_key: "foo" + - request_headers: + header_name: "bar" + descriptor_key: "bar" + - actions: + - request_headers: + header_name: "source_cluster" + descriptor_key: "source_cluster" + - request_headers: + header_name: "destination_cluster" + descriptor_key: "destination_cluster" + - actions: + - request_headers: + header_name: "category" + descriptor_key: "category" diff --git a/examples/ratelimit/config/example.yaml b/examples/ratelimit/config/example.yaml index 39fd61f2..52cc8e42 100644 --- a/examples/ratelimit/config/example.yaml +++ b/examples/ratelimit/config/example.yaml @@ -1,14 +1,42 @@ --- domain: rl descriptors: + - key: category + value: account + rate_limit: + replaces: + - name: bkthomps + - name: fake_name + unit: minute + requests_per_unit: 4 - key: source_cluster value: proxy descriptors: + - key: destination_cluster + value: bkthomps + rate_limit: + replaces: + - name: bkthomps + unit: minute + requests_per_unit: 2 - key: destination_cluster value: mock rate_limit: unit: minute requests_per_unit: 1 + - key: destination_cluster + value: override + rate_limit: + replaces: + - name: banned_limit + unit: minute + requests_per_unit: 2 + - key: destination_cluster + value: fake + rate_limit: + name: fake_name + unit: minute + requests_per_unit: 2 - key: foo rate_limit: unit: minute @@ -18,9 +46,16 @@ descriptors: rate_limit: unit: minute requests_per_unit: 3 + - key: bar + value: bkthomps + rate_limit: + name: bkthomps + unit: minute + requests_per_unit: 1 - key: bar value: banned rate_limit: + name: banned_limit unit: minute requests_per_unit: 0 - key: baz diff --git a/go.sum b/go.sum index 61bd52b2..70f0c04c 100644 --- a/go.sum +++ b/go.sum @@ -151,6 +151,7 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 h1:VQjjMh+uElTfioy6GnUrVrTMAiLTNF3xsrAlSwC+g8o= github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= @@ -349,9 +350,12 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/integration-test/scripts/replaces.sh b/integration-test/scripts/replaces.sh new file mode 100755 index 00000000..b343d509 --- /dev/null +++ b/integration-test/scripts/replaces.sh @@ -0,0 +1,109 @@ +#!/bin/bash + +function assert_ok() { + if [ $? -ne 0 ]; then + echo "Rate limited the request, but should not have" + exit 1 + fi +} + +function assert_limited() { + if [ $? -eq 0 ]; then + echo "Should have rate limited the request, but it was not" + exit 1 + fi +} + +# +# Verify that replaces increases the limit. +# +# descriptor: (foo: *), (bar: banned) +# name: banned_limit +# quota: 0 / min +# +# descriptor: (source_cluster: proxy), (destination_cluster: override) +# replaces: banned_limit +# quota: 2 / min +# + +response=$(curl -f -s -H "foo: bkthomps" -H "bar: banned" http://envoy-proxy:8888/twoheader) +assert_limited + +response=$(curl -f -s -H "foo: bkthomps" -H "bar: banned" -H "source_cluster: proxy" -H "destination_cluster: mock" http://envoy-proxy:8888/fourheader) +assert_limited + +response=$(curl -f -s -H "foo: bkthomps" -H "bar: banned" -H "source_cluster: proxy" -H "destination_cluster: override" http://envoy-proxy:8888/fourheader) +assert_ok +response=$(curl -f -s -H "foo: bkthomps" -H "bar: banned" -H "source_cluster: proxy" -H "destination_cluster: override" http://envoy-proxy:8888/fourheader) +assert_ok +response=$(curl -f -s -H "foo: bkthomps" -H "bar: banned" -H "source_cluster: proxy" -H "destination_cluster: override" http://envoy-proxy:8888/fourheader) +assert_limited + +# +# Verify that replaces doesn't affect the original limit. +# +# descriptor: (foo: *), (bar: bkthomps) +# name: bkthomps +# quota: 1 / min +# +# descriptor: (source_cluster: proxy), (destination_cluster: bkthomps) +# replaces: bkthomps +# quota: 2 / min +# + +response=$(curl -f -s -H "foo: my_foo" -H "bar: bkthomps" -H "source_cluster: proxy" -H "destination_cluster: bkthomps" http://envoy-proxy:8888/fourheader) +assert_ok +response=$(curl -f -s -H "foo: my_foo" -H "bar: bkthomps" -H "source_cluster: proxy" -H "destination_cluster: bkthomps" http://envoy-proxy:8888/fourheader) +assert_ok +response=$(curl -f -s -H "foo: my_foo" -H "bar: bkthomps" -H "source_cluster: proxy" -H "destination_cluster: bkthomps" http://envoy-proxy:8888/fourheader) +assert_limited + +response=$(curl -f -s -H "foo: my_foo" -H "bar: bkthomps" -H "source_cluster: proxy" -H "destination_cluster: not_bkthomps" http://envoy-proxy:8888/fourheader) +assert_ok +response=$(curl -f -s -H "foo: my_foo" -H "bar: bkthomps" -H "source_cluster: proxy" -H "destination_cluster: not_bkthomps" http://envoy-proxy:8888/fourheader) +assert_limited + +response=$(curl -f -s -H "foo: my_foo" -H "bar: bkthomps" -H "source_cluster: proxy" -H "destination_cluster: bkthomps" http://envoy-proxy:8888/fourheader) +assert_limited + +# +# Verify that replaces can replace multiple descriptors. +# +# descriptor: (foo: *), (bar: bkthomps) +# name: bkthomps +# quota: 1 / min +# +# descriptor: (source_cluster: proxy), (destination_cluster: fake) +# name: fake_name +# quota: 2 / min +# +# descriptor: (category: account) +# replaces: bkthomps, fake_name +# quota: 4 / min +# + +response=$(curl -f -s -H "foo: my_foo" -H "bar: bkthomps" -H "source_cluster: proxy" -H "destination_cluster: fake" -H "category: account" http://envoy-proxy:8888/fiveheader) +assert_ok +response=$(curl -f -s -H "foo: my_foo" -H "bar: bkthomps" -H "source_cluster: proxy" -H "destination_cluster: fake" -H "category: account" http://envoy-proxy:8888/fiveheader) +assert_ok +response=$(curl -f -s -H "foo: my_foo" -H "bar: bkthomps" -H "source_cluster: proxy" -H "destination_cluster: fake" -H "category: account" http://envoy-proxy:8888/fiveheader) +assert_ok +response=$(curl -f -s -H "foo: my_foo" -H "bar: bkthomps" -H "source_cluster: proxy" -H "destination_cluster: fake" -H "category: account" http://envoy-proxy:8888/fiveheader) +assert_ok +response=$(curl -f -s -H "foo: my_foo" -H "bar: bkthomps" -H "source_cluster: proxy" -H "destination_cluster: fake" -H "category: account" http://envoy-proxy:8888/fiveheader) +assert_limited + +response=$(curl -f -s -H "foo: foo_2" -H "bar: bkthomps" http://envoy-proxy:8888/twoheader) +assert_ok +response=$(curl -f -s -H "foo: foo_2" -H "bar: bkthomps" http://envoy-proxy:8888/twoheader) +assert_limited + +response=$(curl -f -s -H "foo: foo_3" -H "bar: bar_3" -H "source_cluster: proxy" -H "destination_cluster: fake" http://envoy-proxy:8888/fourheader) +assert_ok +response=$(curl -f -s -H "foo: foo_3" -H "bar: bar_3" -H "source_cluster: proxy" -H "destination_cluster: fake" http://envoy-proxy:8888/fourheader) +assert_ok +response=$(curl -f -s -H "foo: foo_3" -H "bar: bar_3" -H "source_cluster: proxy" -H "destination_cluster: fake" http://envoy-proxy:8888/fourheader) +assert_limited + +response=$(curl -f -s -H "foo: my_foo" -H "bar: bkthomps" -H "source_cluster: proxy" -H "destination_cluster: fake" -H "category: account" http://envoy-proxy:8888/fiveheader) +assert_limited diff --git a/src/config/config.go b/src/config/config.go index d6f7e530..6aac1769 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -22,6 +22,8 @@ type RateLimit struct { Limit *pb.RateLimitResponse_RateLimit Unlimited bool ShadowMode bool + Name string + Replaces []string } // Interface for interacting with a loaded rate limit config. diff --git a/src/config/config_impl.go b/src/config/config_impl.go index ec3fa4a6..d1d22a4b 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -13,10 +13,16 @@ import ( "github.com/envoyproxy/ratelimit/src/stats" ) +type yamlReplaces struct { + Name string +} + type yamlRateLimit struct { RequestsPerUnit uint32 `yaml:"requests_per_unit"` Unit string Unlimited bool `yaml:"unlimited"` + Name string + Replaces []yamlReplaces } type yamlDescriptor struct { @@ -56,6 +62,8 @@ var validKeys = map[string]bool{ "requests_per_unit": true, "unlimited": true, "shadow_mode": true, + "name": true, + "replaces": true, } // Create a new rate limit config entry. @@ -64,10 +72,21 @@ var validKeys = map[string]bool{ // @param rlStats supplies the stats structure associated with the RateLimit // @param unlimited supplies whether the rate limit is unlimited // @return the new config entry. -func NewRateLimit( - requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stats.RateLimitStats, unlimited bool, shadowMode bool) *RateLimit { - - return &RateLimit{FullKey: rlStats.GetKey(), Stats: rlStats, Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}, Unlimited: unlimited, ShadowMode: shadowMode} +func NewRateLimit(requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stats.RateLimitStats, + unlimited bool, shadowMode bool, name string, replaces []string) *RateLimit { + + return &RateLimit{ + FullKey: rlStats.GetKey(), + Stats: rlStats, + Limit: &pb.RateLimitResponse_RateLimit{ + RequestsPerUnit: requestsPerUnit, + Unit: unit, + }, + Unlimited: unlimited, + ShadowMode: shadowMode, + Name: name, + Replaces: replaces, + } } // Dump an individual descriptor for debugging purposes. @@ -135,11 +154,28 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p fmt.Sprintf("invalid rate limit unit '%s'", descriptorConfig.RateLimit.Unit))) } + replaces := make([]string, len(descriptorConfig.RateLimit.Replaces)) + for i, e := range descriptorConfig.RateLimit.Replaces { + replaces[i] = e.Name + } + rateLimit = NewRateLimit( - descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), statsManager.NewStats(newParentKey), unlimited, descriptorConfig.ShadowMode) + descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), + statsManager.NewStats(newParentKey), unlimited, descriptorConfig.ShadowMode, + descriptorConfig.RateLimit.Name, replaces, + ) rateLimitDebugString = fmt.Sprintf( " ratelimit={requests_per_unit=%d, unit=%s, unlimited=%t, shadow_mode=%t}", rateLimit.Limit.RequestsPerUnit, rateLimit.Limit.Unit.String(), rateLimit.Unlimited, rateLimit.ShadowMode) + + for _, replaces := range descriptorConfig.RateLimit.Replaces { + if replaces.Name == "" { + panic(newRateLimitConfigError(config, "should not have an empty replaces entry")) + } + if replaces.Name == descriptorConfig.RateLimit.Name { + panic(newRateLimitConfigError(config, "replaces should not contain name of same descriptor")) + } + } } logger.Debugf( @@ -260,7 +296,10 @@ func (this *rateLimitConfigImpl) GetLimit( rateLimitOverrideUnit, this.statsManager.NewStats(rateLimitKey), false, - false) + false, + "", + []string{}, + ) return rateLimit } diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index bb74e2e0..0eb78552 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -114,6 +114,8 @@ func (this *service) constructLimitsToCheck(request *pb.RateLimitRequest, ctx co limitsToCheck := make([]*config.RateLimit, len(request.Descriptors)) isUnlimited := make([]bool, len(request.Descriptors)) + replacing := make(map[string]bool) + for i, descriptor := range request.Descriptors { if logger.IsLevelEnabled(logger.DebugLevel) { var descriptorEntryStrings []string @@ -143,9 +145,28 @@ func (this *service) constructLimitsToCheck(request *pb.RateLimitRequest, ctx co } } - if limitsToCheck[i] != nil && limitsToCheck[i].Unlimited { - isUnlimited[i] = true + if limitsToCheck[i] != nil { + for _, replace := range limitsToCheck[i].Replaces { + replacing[replace] = true + } + + if limitsToCheck[i].Unlimited { + isUnlimited[i] = true + limitsToCheck[i] = nil + } + } + } + + for i, limit := range limitsToCheck { + if limit == nil || limit.Name == "" { + continue + } + _, exists := replacing[limit.Name] + if exists { limitsToCheck[i] = nil + if logger.IsLevelEnabled(logger.DebugLevel) { + logger.Debugf("replacing %s", limit.Name) + } } } return limitsToCheck, isUnlimited diff --git a/test/config/config_test.go b/test/config/config_test.go index d22bdcaf..797c70e8 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -324,6 +324,28 @@ func TestBadLimitUnit(t *testing.T) { "bad_limit_unit.yaml: invalid rate limit unit 'foo'") } +func TestReplacesSelf(t *testing.T) { + expectConfigPanic( + t, + func() { + config.NewRateLimitConfigImpl( + loadFile("replaces_self.yaml"), + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) + }, + "replaces_self.yaml: replaces should not contain name of same descriptor") +} + +func TestReplacesEmpty(t *testing.T) { + expectConfigPanic( + t, + func() { + config.NewRateLimitConfigImpl( + loadFile("replaces_empty.yaml"), + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) + }, + "replaces_empty.yaml: should not have an empty replaces entry") +} + func TestBadYaml(t *testing.T) { expectConfigPanic( t, diff --git a/test/config/replaces_empty.yaml b/test/config/replaces_empty.yaml new file mode 100644 index 00000000..c4d7a109 --- /dev/null +++ b/test/config/replaces_empty.yaml @@ -0,0 +1,12 @@ +domain: test-domain +descriptors: + - key: key1 + value: value1 + descriptors: + - key: subkey1 + rate_limit: + name: self + replaces: + - name: + unit: second + requests_per_unit: 5 diff --git a/test/config/replaces_self.yaml b/test/config/replaces_self.yaml new file mode 100644 index 00000000..e49a47b2 --- /dev/null +++ b/test/config/replaces_self.yaml @@ -0,0 +1,12 @@ +domain: test-domain +descriptors: + - key: key1 + value: value1 + descriptors: + - key: subkey1 + rate_limit: + name: self + replaces: + - name: self + unit: second + requests_per_unit: 5 diff --git a/test/limiter/base_limiter_test.go b/test/limiter/base_limiter_test.go index 1a9a4d15..c37ad5e3 100644 --- a/test/limiter/base_limiter_test.go +++ b/test/limiter/base_limiter_test.go @@ -29,7 +29,7 @@ func TestGenerateCacheKeys(t *testing.T) { timeSource.EXPECT().UnixNow().Return(int64(1234)) baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value()) cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, 1) assert.Equal(1, len(cacheKeys)) @@ -48,7 +48,7 @@ func TestGenerateCacheKeysPrefix(t *testing.T) { timeSource.EXPECT().UnixNow().Return(int64(1234)) baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "prefix:", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value()) cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, 1) assert.Equal(1, len(cacheKeys)) @@ -102,7 +102,7 @@ func TestGetResponseStatusOverLimitWithLocalCache(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 4, 5) // As `isOverLimitWithLocalCache` is passed as `true`, immediate response is returned with no checks of the limits. responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, true, 2) @@ -125,7 +125,7 @@ func TestGetResponseStatusOverLimitWithLocalCacheShadowMode(t *testing.T) { sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) // This limit is in ShadowMode - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true)} + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true, "", nil)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 4, 5) // As `isOverLimitWithLocalCache` is passed as `true`, immediate response is returned with no checks of the limits. responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, true, 2) @@ -149,7 +149,7 @@ func TestGetResponseStatusOverLimit(t *testing.T) { localCache := freecache.NewCache(100) sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, localCache, 0.8, "", sm) - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 7, 4, 5) responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) assert.Equal(pb.RateLimitResponse_OVER_LIMIT, responseStatus.GetCode()) @@ -175,7 +175,7 @@ func TestGetResponseStatusOverLimitShadowMode(t *testing.T) { sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, localCache, 0.8, "", sm) // Key is in shadow_mode: true - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true)} + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true, "", nil)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 7, 4, 5) responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) @@ -197,7 +197,7 @@ func TestGetResponseStatusBelowLimit(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 9, 10) responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) @@ -218,7 +218,7 @@ func TestGetResponseStatusBelowLimitShadowMode(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true, "", nil)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 9, 10) responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) diff --git a/test/memcached/cache_impl_test.go b/test/memcached/cache_impl_test.go index 14106934..d70346ce 100644 --- a/test/memcached/cache_impl_test.go +++ b/test/memcached/cache_impl_test.go @@ -53,7 +53,7 @@ func TestMemcached(t *testing.T) { client.EXPECT().Increment("domain_key_value_1234", uint64(1)).Return(uint64(5), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -77,7 +77,7 @@ func TestMemcached(t *testing.T) { }, 1) limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false, "", nil), } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -111,8 +111,8 @@ func TestMemcached(t *testing.T) { {{"key3", "value3"}, {"subkey3", "subvalue3"}}, }, 1) limits = []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false, false), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false, false, "", nil), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false, "", nil), } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -150,7 +150,7 @@ func TestMemcachedGetError(t *testing.T) { client.EXPECT().Increment("domain_key_value_1234", uint64(1)).Return(uint64(5), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -168,7 +168,7 @@ func TestMemcachedGetError(t *testing.T) { client.EXPECT().Increment("domain_key_value1_1234", uint64(1)).Return(uint64(5), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value1"}}}, 1) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value1"), false, false)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value1"), false, false, "", nil)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -241,7 +241,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false), + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, "", nil), } assert.Equal( @@ -342,7 +342,7 @@ func TestNearLimit(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false), + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, "", nil), } assert.Equal( @@ -399,7 +399,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key5_value5_1234", uint64(3)).Return(uint64(5), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"), false, false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"), false, false, "", nil)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -417,7 +417,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key6_value6_1234", uint64(2)).Return(uint64(7), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) - limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"), false, false)} + limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"), false, false, "", nil)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -435,7 +435,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key7_value7_1234", uint64(3)).Return(uint64(19), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"), false, false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"), false, false, "", nil)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -453,7 +453,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key8_value8_1234", uint64(3)).Return(uint64(22), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"), false, false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"), false, false, "", nil)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -471,7 +471,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key9_value9_1234", uint64(7)).Return(uint64(22), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"), false, false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"), false, false, "", nil)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -489,7 +489,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key10_value10_1234", uint64(3)).Return(uint64(30), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"), false, false)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"), false, false, "", nil)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -533,7 +533,7 @@ func TestMemcacheWithJitter(t *testing.T) { ).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -576,7 +576,7 @@ func TestMemcacheAdd(t *testing.T) { uint64(2), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -600,7 +600,7 @@ func TestMemcacheAdd(t *testing.T) { ).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key2", "value2"}}}, 1) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2"), false, false)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2"), false, false, "", nil)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -673,7 +673,7 @@ func TestMemcachedTracer(t *testing.T) { client.EXPECT().Increment("domain_key_value_1234", uint64(1)).Return(uint64(5), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} cache.DoLimit(context.Background(), request, limits) diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index 27eaf6d6..28a98719 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -49,7 +49,7 @@ func BenchmarkParallelDoLimit(b *testing.B) { cache := redis.NewFixedRateLimitCacheImpl(client, nil, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, "", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} + limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} // wait for the pool to fill up for { diff --git a/test/redis/fixed_cache_impl_test.go b/test/redis/fixed_cache_impl_test.go index 8933d200..a4900cba 100644 --- a/test/redis/fixed_cache_impl_test.go +++ b/test/redis/fixed_cache_impl_test.go @@ -69,7 +69,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -94,7 +94,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { }, 1) limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false, "", nil), } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -124,8 +124,8 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { {{"key3", "value3"}, {"subkey3", "subvalue3"}}, }, 1) limits = []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false, false), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false, false, "", nil), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false, "", nil), } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -204,7 +204,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false), + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, "", nil), } assert.Equal( @@ -304,7 +304,7 @@ func TestNearLimit(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false), + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, "", nil), } assert.Equal( @@ -360,7 +360,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"), false, false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"), false, false, "", nil)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -377,7 +377,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) - limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"), false, false)} + limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"), false, false, "", nil)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -394,7 +394,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"), false, false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"), false, false, "", nil)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -411,7 +411,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"), false, false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"), false, false, "", nil)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -428,7 +428,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"), false, false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"), false, false, "", nil)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -445,7 +445,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"), false, false)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"), false, false, "", nil)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -475,7 +475,7 @@ func TestRedisWithJitter(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -510,7 +510,7 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, true), + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, true, "", nil), } assert.Equal( @@ -615,7 +615,7 @@ func TestRedisTracer(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} cache.DoLimit(context.Background(), request, limits) spanStubs := testSpanExporter.GetSpans() diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index 86fed5e9..76c0093f 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -145,7 +145,7 @@ func TestService(test *testing.T) { request = common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil), nil, } t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) @@ -181,7 +181,7 @@ func TestService(test *testing.T) { // Config should still be valid. Also make sure order does not affect results. limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil), } t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[1]).Return(limits[1]) @@ -234,7 +234,7 @@ func TestServiceGlobalShadowMode(test *testing.T) { // Global Shadow mode limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil), nil, } t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) @@ -274,8 +274,8 @@ func TestRuleShadowMode(test *testing.T) { request := common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true, "", nil), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true, "", nil), } t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[1]).Return(limits[1]) @@ -307,8 +307,8 @@ func TestMixedRuleShadowMode(test *testing.T) { request := common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true, "", nil), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil), } t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[1]).Return(limits[1]) @@ -366,7 +366,7 @@ func TestServiceWithCustomRatelimitHeaders(test *testing.T) { request := common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil), nil, } t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) @@ -418,7 +418,7 @@ func TestServiceWithDefaultRatelimitHeaders(test *testing.T) { request := common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil), nil, } t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) @@ -478,7 +478,7 @@ func TestCacheError(test *testing.T) { service := t.setupBasicService() request := common.NewRateLimitRequest("different-domain", [][][2]string{{{"foo", "bar"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil)} t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) t.cache.EXPECT().DoLimit(context.Background(), request, limits).Do( func(context.Context, *pb.RateLimitRequest, []*config.RateLimit) { @@ -522,9 +522,9 @@ func TestUnlimited(test *testing.T) { request := common.NewRateLimitRequest( "some-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}, {{"baz", "qux"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("foo_bar"), false, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("foo_bar"), false, false, "", nil), nil, - config.NewRateLimit(55, pb.RateLimitResponse_RateLimit_SECOND, t.statsManager.NewStats("baz_qux"), true, false), + config.NewRateLimit(55, pb.RateLimitResponse_RateLimit_SECOND, t.statsManager.NewStats("baz_qux"), true, false, "", nil), } t.config.EXPECT().GetLimit(context.Background(), "some-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(context.Background(), "some-domain", request.Descriptors[1]).Return(limits[1]) From db150da796c677f61c0175997dd4e172bd756749 Mon Sep 17 00:00:00 2001 From: m-rcl <14212817+m-rcl@users.noreply.github.com> Date: Tue, 26 Jul 2022 21:42:17 +0200 Subject: [PATCH 041/181] Mask redis credentials in logs (#347) * use golang 1.18 in docker images Signed-off-by: Marcel Wiederer * mask redis credentials when logging the connection strings Signed-off-by: Marcel Wiederer Co-authored-by: Marcel Wiederer --- docker-compose.yml | 4 ++-- src/redis/driver_impl.go | 6 ++++-- src/utils/utilities.go | 20 ++++++++++++++++++ test/utils/utilities_test.go | 41 ++++++++++++++++++++++++++++++++++++ 4 files changed, 67 insertions(+), 4 deletions(-) create mode 100644 test/utils/utilities_test.go diff --git a/docker-compose.yml b/docker-compose.yml index 8271245e..ad92837d 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -20,7 +20,7 @@ services: # minimal container that builds the ratelimit service binary and exits. ratelimit-build: - image: golang:1.14-alpine + image: golang:1.18-alpine working_dir: /go/src/github.com/envoyproxy/ratelimit command: go build -o /usr/local/bin/ratelimit ./src/service_cmd/main.go volumes: @@ -28,7 +28,7 @@ services: - binary:/usr/local/bin/ ratelimit-client-build: - image: golang:1.14-alpine + image: golang:1.18-alpine working_dir: /go/src/github.com/envoyproxy/ratelimit command: go build -o /usr/local/bin/ratelimit_client ./src/client_cmd/main.go volumes: diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index 280a7cbb..f1516da2 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -12,6 +12,7 @@ import ( logger "github.com/sirupsen/logrus" "github.com/envoyproxy/ratelimit/src/server" + "github.com/envoyproxy/ratelimit/src/utils" ) type poolStats struct { @@ -65,7 +66,8 @@ func checkError(err error) { func NewClientImpl(scope stats.Scope, useTls bool, auth, redisSocketType, redisType, url string, poolSize int, pipelineWindow time.Duration, pipelineLimit int, tlsConfig *tls.Config, healthCheckActiveConnection bool, srv server.Server) Client { - logger.Warnf("connecting to redis on %s with pool size %d", url, poolSize) + maskedUrl := utils.MaskCredentialsInUrl(url) + logger.Warnf("connecting to redis on %s with pool size %d", maskedUrl, poolSize) df := func(network, addr string) (radix.Conn, error) { var dialOpts []radix.DialOpt @@ -75,7 +77,7 @@ func NewClientImpl(scope stats.Scope, useTls bool, auth, redisSocketType, redisT } if auth != "" { - logger.Warnf("enabling authentication to redis on %s", url) + logger.Warnf("enabling authentication to redis on %s", maskedUrl) dialOpts = append(dialOpts, radix.DialAuthPass(auth)) } diff --git a/src/utils/utilities.go b/src/utils/utilities.go index c8001b03..f9ecf856 100644 --- a/src/utils/utilities.go +++ b/src/utils/utilities.go @@ -1,6 +1,8 @@ package utils import ( + "strings" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/golang/protobuf/ptypes/duration" ) @@ -41,3 +43,21 @@ func Max(a uint32, b uint32) uint32 { } return b } + +// Mask credentials from a redis connection string like +// foo,redis://user:pass@redisurl1,redis://user:pass@redisurl2 +// resulting in +// foo,redis://*****@redisurl1,redis://*****@redisurl2 +func MaskCredentialsInUrl(url string) string { + urls := strings.Split(url, ",") + + for i := 0; i < len(urls); i++ { + url := urls[i] + authUrlParts := strings.Split(url, "@") + if len(authUrlParts) > 1 && strings.HasPrefix(authUrlParts[0], "redis://") { + urls[i] = "redis://*****@" + authUrlParts[len(authUrlParts)-1] + } + } + + return strings.Join(urls, ",") +} diff --git a/test/utils/utilities_test.go b/test/utils/utilities_test.go new file mode 100644 index 00000000..aa3768d4 --- /dev/null +++ b/test/utils/utilities_test.go @@ -0,0 +1,41 @@ +package utils_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/envoyproxy/ratelimit/src/utils" +) + +func TestMaskCredentialsInUrl(t *testing.T) { + url := "redis:6379" + assert.Equal(t, url, utils.MaskCredentialsInUrl(url)) + + url = "redis://foo:bar@redis:6379" + expected := "redis://*****@redis:6379" + assert.Equal(t, expected, utils.MaskCredentialsInUrl(url)) +} + +func TestMaskCredentialsInUrlCluster(t *testing.T) { + url := "redis1:6379,redis2:6379" + assert.Equal(t, url, utils.MaskCredentialsInUrl(url)) + + url = "redis://foo:bar@redis1:6379,redis://foo:bar@redis2:6379" + expected := "redis://*****@redis1:6379,redis://*****@redis2:6379" + assert.Equal(t, expected, utils.MaskCredentialsInUrl(url)) + + url = "redis://foo:b@r@redis1:6379,redis://foo:b@r@redis2:6379" + expected = "redis://*****@redis1:6379,redis://*****@redis2:6379" + assert.Equal(t, expected, utils.MaskCredentialsInUrl(url)) +} + +func TestMaskCredentialsInUrlSentinel(t *testing.T) { + url := "foobar,redis://foo:bar@redis1:6379,redis://foo:bar@redis2:6379" + expected := "foobar,redis://*****@redis1:6379,redis://*****@redis2:6379" + assert.Equal(t, expected, utils.MaskCredentialsInUrl(url)) + + url = "foob@r,redis://foo:b@r@redis1:6379,redis://foo:b@r@redis2:6379" + expected = "foob@r,redis://*****@redis1:6379,redis://*****@redis2:6379" + assert.Equal(t, expected, utils.MaskCredentialsInUrl(url)) +} From 9d8d70a8bf93a493fb3b6bdc31fcce666076cae0 Mon Sep 17 00:00:00 2001 From: Renuka Piyumal Fernando Date: Tue, 16 Aug 2022 20:32:28 +0530 Subject: [PATCH 042/181] Upgrade mediocregopher/radix/v3 (#352) Current ratelimit service is not supporting cluster mode in Redis 7.x.x Fix envoyproxy/ratelimit/issues#351 Signed-off-by: Renuka Fernando --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5ce950c3..f13b19cf 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/kelseyhightower/envconfig v1.4.0 github.com/lyft/goruntime v0.2.5 github.com/lyft/gostats v0.4.0 - github.com/mediocregopher/radix/v3 v3.5.1 + github.com/mediocregopher/radix/v3 v3.8.1 github.com/sirupsen/logrus v1.6.0 github.com/stretchr/testify v1.7.1 golang.org/x/net v0.0.0-20220325170049-de3da57026de diff --git a/go.sum b/go.sum index 70f0c04c..3fcb608b 100644 --- a/go.sum +++ b/go.sum @@ -176,8 +176,8 @@ github.com/lyft/goruntime v0.2.5 h1:yRmwOXl3Zns3+Z03fDMWt5+p609rfhIErh7HYCayODg= github.com/lyft/goruntime v0.2.5/go.mod h1:8rUh5gwIPQtyIkIXHbLN1j45HOb8cMgDhrw5GA7DF4g= github.com/lyft/gostats v0.4.0 h1:PbRWmwidTPk6Y80S6itBWDa+XVt1hGvqFM88TBJYdOo= github.com/lyft/gostats v0.4.0/go.mod h1:Tpx2xRzz4t+T2Tx0xdVgIoBdR2UMVz+dKnE3X01XSd8= -github.com/mediocregopher/radix/v3 v3.5.1 h1:IOYgQUMA380N4khaL5eNT4v/P2LnHa8b0wnVdwZMFsY= -github.com/mediocregopher/radix/v3 v3.5.1/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= +github.com/mediocregopher/radix/v3 v3.8.1 h1:rOkHflVuulFKlwsLY01/M2cM2tWCjDoETcMqKbAWu1M= +github.com/mediocregopher/radix/v3 v3.8.1/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= From 7b60ac7991660a55d1bfb667465a4586a54dae2e Mon Sep 17 00:00:00 2001 From: Marcel Wiederer <14212817+m-rcl@users.noreply.github.com> Date: Thu, 1 Sep 2022 16:41:43 +0200 Subject: [PATCH 043/181] Allow merge of configs referencing the same domain (#357) Add MERGE_DOMAIN_CONFIG variable, defaulting to false mapping to the former behavior. Setting to true will allow multiple files containing non duplicate keys and referencing the same domain to be merged. Signed-off-by: Marcel Wiederer --- README.md | 5 +- src/config/config.go | 3 +- src/config/config_impl.go | 24 ++++++---- src/config_check_cmd/main.go | 8 ++-- src/service/ratelimit.go | 4 +- src/settings/settings.go | 3 ++ test/config/config_test.go | 74 +++++++++++++++++++++++------- test/config/merge_domain_key1.yaml | 9 ++++ test/config/merge_domain_key2.yaml | 9 ++++ test/mocks/config/config.go | 8 ++-- test/service/ratelimit_test.go | 28 +++++------ 11 files changed, 126 insertions(+), 49 deletions(-) create mode 100644 test/config/merge_domain_key1.yaml create mode 100644 test/config/merge_domain_key2.yaml diff --git a/README.md b/README.md index 5abd0fa1..a5d3040f 100644 --- a/README.md +++ b/README.md @@ -515,6 +515,9 @@ The former is the default behavior. To use the latter method, set the `RUNTIME_W For more information on how runtime works you can read its [README](https://github.com/lyft/goruntime). +By default it is not possible to define multiple configuration files within `RUNTIME_SUBDIRECTORY` referencing the same domain. +To enable this behavior set `MERGE_DOMAIN_CONFIG` to `true`. + ## Log Format A centralized log collection system works better with logs in json format. JSON format avoids the need for custom parsing rules. @@ -586,7 +589,7 @@ There is a global shadow-mode which can make it easier to introduce rate limitin The global shadow mode is configured with an environment variable -Setting environment variable`SHADOW_MODE` to `true` will enable the feature. +Setting environment variable `SHADOW_MODE` to `true` will enable the feature. ## Statistics diff --git a/src/config/config.go b/src/config/config.go index 6aac1769..7a22734d 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -50,7 +50,8 @@ type RateLimitConfigLoader interface { // Load a new configuration from a list of YAML files. // @param configs supplies a list of full YAML files in string form. // @param statsManager supplies the statsManager to initialize stats during runtime. + // @param mergeDomainConfigs defines whether multiple configurations referencing the same domain will be merged or rejected throwing an error. // @return a new configuration. // @throws RateLimitConfigError if the configuration could not be created. - Load(configs []RateLimitConfigToLoad, statsManager stats.Manager) RateLimitConfig + Load(configs []RateLimitConfigToLoad, statsManager stats.Manager, mergeDomainConfigs bool) RateLimitConfig } diff --git a/src/config/config_impl.go b/src/config/config_impl.go index d1d22a4b..aa440400 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -48,8 +48,9 @@ type rateLimitDomain struct { } type rateLimitConfigImpl struct { - domains map[string]*rateLimitDomain - statsManager stats.Manager + domains map[string]*rateLimitDomain + statsManager stats.Manager + mergeDomainConfigs bool } var validKeys = map[string]bool{ @@ -257,8 +258,14 @@ func (this *rateLimitConfigImpl) loadConfig(config RateLimitConfigToLoad) { } if _, present := this.domains[root.Domain]; present { - panic(newRateLimitConfigError( - config, fmt.Sprintf("duplicate domain '%s' in config file", root.Domain))) + if !this.mergeDomainConfigs { + panic(newRateLimitConfigError( + config, fmt.Sprintf("duplicate domain '%s' in config file", root.Domain))) + } + + logger.Debugf("patching domain: %s", root.Domain) + this.domains[root.Domain].loadDescriptors(config, root.Domain+".", root.Descriptors, this.statsManager) + return } logger.Debugf("loading domain: %s", root.Domain) @@ -353,11 +360,12 @@ func descriptorKey(domain string, descriptor *pb_struct.RateLimitDescriptor) str // Create rate limit config from a list of input YAML files. // @param configs specifies a list of YAML files to load. // @param stats supplies the stats scope to use for limit stats during runtime. +// @param mergeDomainConfigs defines whether multiple configurations referencing the same domain will be merged or rejected throwing an error. // @return a new config. func NewRateLimitConfigImpl( - configs []RateLimitConfigToLoad, statsManager stats.Manager) RateLimitConfig { + configs []RateLimitConfigToLoad, statsManager stats.Manager, mergeDomainConfigs bool) RateLimitConfig { - ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}, statsManager} + ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}, statsManager, mergeDomainConfigs} for _, config := range configs { ret.loadConfig(config) } @@ -368,9 +376,9 @@ func NewRateLimitConfigImpl( type rateLimitConfigLoaderImpl struct{} func (this *rateLimitConfigLoaderImpl) Load( - configs []RateLimitConfigToLoad, statsManager stats.Manager) RateLimitConfig { + configs []RateLimitConfigToLoad, statsManager stats.Manager, mergeDomainConfigs bool) RateLimitConfig { - return NewRateLimitConfigImpl(configs, statsManager) + return NewRateLimitConfigImpl(configs, statsManager, mergeDomainConfigs) } // @return a new default config loader implementation. diff --git a/src/config_check_cmd/main.go b/src/config_check_cmd/main.go index 0c7480e2..750af791 100644 --- a/src/config_check_cmd/main.go +++ b/src/config_check_cmd/main.go @@ -15,7 +15,7 @@ import ( "github.com/envoyproxy/ratelimit/src/config" ) -func loadConfigs(allConfigs []config.RateLimitConfigToLoad) { +func loadConfigs(allConfigs []config.RateLimitConfigToLoad, mergeDomainConfigs bool) { defer func() { err := recover() if err != nil { @@ -24,12 +24,14 @@ func loadConfigs(allConfigs []config.RateLimitConfigToLoad) { } }() statsManager := stats.NewStatManager(gostats.NewStore(gostats.NewNullSink(), false), settings.NewSettings()) - config.NewRateLimitConfigImpl(allConfigs, statsManager) + config.NewRateLimitConfigImpl(allConfigs, statsManager, mergeDomainConfigs) } func main() { configDirectory := flag.String( "config_dir", "", "path to directory containing rate limit configs") + mergeDomainConfigs := flag.Bool( + "merge_domain_configs", false, "whether to merge configurations, referencing the same domain") flag.Parse() fmt.Printf("checking rate limit configs...\n") fmt.Printf("loading config directory: %s\n", *configDirectory) @@ -52,6 +54,6 @@ func main() { allConfigs = append(allConfigs, config.RateLimitConfigToLoad{finalPath, string(bytes)}) } - loadConfigs(allConfigs) + loadConfigs(allConfigs, *mergeDomainConfigs) fmt.Printf("all rate limit configs ok\n") } diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index 0eb78552..621a83df 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -75,12 +75,12 @@ func (this *service) reloadConfig(statsManager stats.Manager) { files = append(files, config.RateLimitConfigToLoad{key, snapshot.Get(key)}) } - newConfig := this.configLoader.Load(files, statsManager) + rlSettings := settings.NewSettings() + newConfig := this.configLoader.Load(files, statsManager, rlSettings.MergeDomainConfigurations) this.stats.ConfigLoadSuccess.Inc() this.configLock.Lock() this.config = newConfig - rlSettings := settings.NewSettings() this.globalShadowMode = rlSettings.GlobalShadowMode if rlSettings.RateLimitResponseHeadersEnabled { diff --git a/src/settings/settings.go b/src/settings/settings.go index c88c186e..0f8f501e 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -124,6 +124,9 @@ type Settings struct { // Should the ratelimiting be running in Global shadow-mode, ie. never report a ratelimit status, unless a rate was provided from envoy as an override GlobalShadowMode bool `envconfig:"SHADOW_MODE" default:"false"` + // Allow merging of multiple yaml files referencing the same domain + MergeDomainConfigurations bool `envconfig:"MERGE_DOMAIN_CONFIG" default:"false"` + // OTLP trace settings TracingEnabled bool `envconfig:"TRACING_ENABLED" default:"false"` TracingServiceName string `envconfig:"TRACING_SERVICE_NAME" default:"RateLimit"` diff --git a/test/config/config_test.go b/test/config/config_test.go index 797c70e8..2b4cb0a7 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -27,7 +27,7 @@ func loadFile(path string) []config.RateLimitConfigToLoad { func TestBasicConfig(t *testing.T) { assert := assert.New(t) stats := stats.NewStore(stats.NewNullSink(), false) - rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), mockstats.NewMockStatManager(stats)) + rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), mockstats.NewMockStatManager(stats), false) rlConfig.Dump() assert.Nil(rlConfig.GetLimit(nil, "foo_domain", &pb_struct.RateLimitDescriptor{})) assert.Nil(rlConfig.GetLimit(nil, "test-domain", &pb_struct.RateLimitDescriptor{})) @@ -179,10 +179,39 @@ func TestBasicConfig(t *testing.T) { assert.EqualValues(1, stats.NewCounter("test-domain.key6.within_limit").Value()) } +func TestDomainMerge(t *testing.T) { + assert := assert.New(t) + stats := stats.NewStore(stats.NewNullSink(), false) + + files := loadFile("merge_domain_key1.yaml") + files = append(files, loadFile("merge_domain_key2.yaml")...) + + rlConfig := config.NewRateLimitConfigImpl(files, mockstats.NewMockStatManager(stats), true) + rlConfig.Dump() + assert.Nil(rlConfig.GetLimit(nil, "foo_domain", &pb_struct.RateLimitDescriptor{})) + assert.Nil(rlConfig.GetLimit(nil, "test-domain", &pb_struct.RateLimitDescriptor{})) + + rl := rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key1", Value: "value1"}}, + }) + assert.NotNil(rl) + assert.EqualValues(10, rl.Limit.RequestsPerUnit) + + rl = rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key2", Value: "value2"}}, + }) + assert.NotNil(rl) + assert.EqualValues(20, rl.Limit.RequestsPerUnit) +} + func TestConfigLimitOverride(t *testing.T) { assert := assert.New(t) stats := stats.NewStore(stats.NewNullSink(), false) - rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), mockstats.NewMockStatManager(stats)) + rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), mockstats.NewMockStatManager(stats), false) rlConfig.Dump() // No matching domain assert.Nil(rlConfig.GetLimit(nil, "foo_domain", &pb_struct.RateLimitDescriptor{ @@ -275,7 +304,7 @@ func TestEmptyDomain(t *testing.T) { t, func() { config.NewRateLimitConfigImpl( - loadFile("empty_domain.yaml"), mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) + loadFile("empty_domain.yaml"), mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)), false) }, "empty_domain.yaml: config file cannot have empty domain") } @@ -286,7 +315,7 @@ func TestDuplicateDomain(t *testing.T) { func() { files := loadFile("basic_config.yaml") files = append(files, loadFile("duplicate_domain.yaml")...) - config.NewRateLimitConfigImpl(files, mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) + config.NewRateLimitConfigImpl(files, mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)), false) }, "duplicate_domain.yaml: duplicate domain 'test-domain' in config file") } @@ -297,7 +326,7 @@ func TestEmptyKey(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("empty_key.yaml"), - mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)), false) }, "empty_key.yaml: descriptor has empty key") } @@ -308,18 +337,31 @@ func TestDuplicateKey(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("duplicate_key.yaml"), - mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)), false) }, "duplicate_key.yaml: duplicate descriptor composite key 'test-domain.key1_value1'") } +func TestDuplicateKeyDomainMerge(t *testing.T) { + expectConfigPanic( + t, + func() { + files := loadFile("merge_domain_key1.yaml") + files = append(files, loadFile("merge_domain_key1.yaml")...) + config.NewRateLimitConfigImpl( + files, + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)), true) + }, + "merge_domain_key1.yaml: duplicate descriptor composite key 'test-domain.key1_value1'") +} + func TestBadLimitUnit(t *testing.T) { expectConfigPanic( t, func() { config.NewRateLimitConfigImpl( loadFile("bad_limit_unit.yaml"), - mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)), false) }, "bad_limit_unit.yaml: invalid rate limit unit 'foo'") } @@ -330,7 +372,7 @@ func TestReplacesSelf(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("replaces_self.yaml"), - mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)), false) }, "replaces_self.yaml: replaces should not contain name of same descriptor") } @@ -341,7 +383,7 @@ func TestReplacesEmpty(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("replaces_empty.yaml"), - mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)), false) }, "replaces_empty.yaml: should not have an empty replaces entry") } @@ -352,7 +394,7 @@ func TestBadYaml(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("bad_yaml.yaml"), - mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)), false) }, "bad_yaml.yaml: error loading config file: yaml: line 2: found unexpected end of stream") } @@ -363,7 +405,7 @@ func TestMisspelledKey(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("misspelled_key.yaml"), - mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)), false) }, "misspelled_key.yaml: config error, unknown key 'ratelimit'") @@ -372,7 +414,7 @@ func TestMisspelledKey(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("misspelled_key2.yaml"), - mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)), false) }, "misspelled_key2.yaml: config error, unknown key 'requestsperunit'") } @@ -383,7 +425,7 @@ func TestNonStringKey(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("non_string_key.yaml"), - mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)), false) }, "non_string_key.yaml: config error, key is not of type string: 0.25") } @@ -394,7 +436,7 @@ func TestNonMapList(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("non_map_list.yaml"), - mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)), false) }, "non_map_list.yaml: config error, yaml file contains list of type other than map: a") } @@ -405,7 +447,7 @@ func TestUnlimitedWithRateLimitUnit(t *testing.T) { func() { config.NewRateLimitConfigImpl( loadFile("unlimited_with_unit.yaml"), - mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) + mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false)), false) }, "unlimited_with_unit.yaml: should not specify rate limit unit when unlimited") } @@ -414,7 +456,7 @@ func TestShadowModeConfig(t *testing.T) { assert := assert.New(t) stats := stats.NewStore(stats.NewNullSink(), false) - rlConfig := config.NewRateLimitConfigImpl(loadFile("shadowmode_config.yaml"), mockstats.NewMockStatManager(stats)) + rlConfig := config.NewRateLimitConfigImpl(loadFile("shadowmode_config.yaml"), mockstats.NewMockStatManager(stats), false) rlConfig.Dump() rl := rlConfig.GetLimit( diff --git a/test/config/merge_domain_key1.yaml b/test/config/merge_domain_key1.yaml new file mode 100644 index 00000000..a955540d --- /dev/null +++ b/test/config/merge_domain_key1.yaml @@ -0,0 +1,9 @@ +# Basic configuration which will be merged with `merge_domain2.yaml` +domain: test-domain +descriptors: + # Top level key/value which is not specified in `merge_domain2.yaml` + - key: key1 + value: value1 + rate_limit: + unit: minute + requests_per_unit: 10 diff --git a/test/config/merge_domain_key2.yaml b/test/config/merge_domain_key2.yaml new file mode 100644 index 00000000..2cc0f849 --- /dev/null +++ b/test/config/merge_domain_key2.yaml @@ -0,0 +1,9 @@ +# Basic configuration which will be merged with `merge_domain.yaml` +domain: test-domain +descriptors: + # Top level key/value which is not specified in `merge_domain.yaml` + - key: key2 + value: value2 + rate_limit: + unit: minute + requests_per_unit: 20 diff --git a/test/mocks/config/config.go b/test/mocks/config/config.go index 7ac5220c..f5989eb7 100644 --- a/test/mocks/config/config.go +++ b/test/mocks/config/config.go @@ -90,15 +90,15 @@ func (m *MockRateLimitConfigLoader) EXPECT() *MockRateLimitConfigLoaderMockRecor } // Load mocks base method -func (m *MockRateLimitConfigLoader) Load(arg0 []config.RateLimitConfigToLoad, arg1 stats.Manager) config.RateLimitConfig { +func (m *MockRateLimitConfigLoader) Load(arg0 []config.RateLimitConfigToLoad, arg1 stats.Manager, arg2 bool) config.RateLimitConfig { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Load", arg0, arg1) + ret := m.ctrl.Call(m, "Load", arg0, arg1, arg2) ret0, _ := ret[0].(config.RateLimitConfig) return ret0 } // Load indicates an expected call of Load -func (mr *MockRateLimitConfigLoaderMockRecorder) Load(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockRateLimitConfigLoaderMockRecorder) Load(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Load", reflect.TypeOf((*MockRateLimitConfigLoader)(nil).Load), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Load", reflect.TypeOf((*MockRateLimitConfigLoader)(nil).Load), arg0, arg1, arg2) } diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index 76c0093f..faddbbdb 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -100,8 +100,8 @@ func (this *rateLimitServiceTestSuite) setupBasicService() ratelimit.RateLimitSe this.snapshot.EXPECT().Keys().Return([]string{"foo", "config.basic_config"}).MinTimes(1) this.snapshot.EXPECT().Get("config.basic_config").Return("fake_yaml").MinTimes(1) this.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, - gomock.Any()).Return(this.config) + []config.RateLimitConfigToLoad{{Name: "config.basic_config", FileBytes: "fake_yaml"}}, + gomock.Any(), gomock.Any()).Return(this.config) // reset exporter before using testSpanExporter.Reset() @@ -136,8 +136,8 @@ func TestService(test *testing.T) { // Force a config reload. barrier := newBarrier() t.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Manager) { barrier.signal() }).Return(t.config) + []config.RateLimitConfigToLoad{{Name: "config.basic_config", FileBytes: "fake_yaml"}}, gomock.Any(), gomock.Any()).Do( + func([]config.RateLimitConfigToLoad, stats.Manager, bool) { barrier.signal() }).Return(t.config) t.runtimeUpdateCallback <- 1 barrier.wait() @@ -170,8 +170,8 @@ func TestService(test *testing.T) { // Config load failure. t.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Manager) { + []config.RateLimitConfigToLoad{{Name: "config.basic_config", FileBytes: "fake_yaml"}}, gomock.Any(), gomock.Any()).Do( + func([]config.RateLimitConfigToLoad, stats.Manager, bool) { defer barrier.signal() panic(config.RateLimitConfigError("load error")) }) @@ -223,8 +223,8 @@ func TestServiceGlobalShadowMode(test *testing.T) { // Force a config reload. barrier := newBarrier() t.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Manager) { barrier.signal() }).Return(t.config) + []config.RateLimitConfigToLoad{{Name: "config.basic_config", FileBytes: "fake_yaml"}}, gomock.Any(), gomock.Any()).Do( + func([]config.RateLimitConfigToLoad, stats.Manager, bool) { barrier.signal() }).Return(t.config) t.runtimeUpdateCallback <- 1 barrier.wait() @@ -357,8 +357,8 @@ func TestServiceWithCustomRatelimitHeaders(test *testing.T) { // Config reload. barrier := newBarrier() t.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Manager) { barrier.signal() }).Return(t.config) + []config.RateLimitConfigToLoad{{Name: "config.basic_config", FileBytes: "fake_yaml"}}, gomock.Any(), gomock.Any()).Do( + func([]config.RateLimitConfigToLoad, stats.Manager, bool) { barrier.signal() }).Return(t.config) t.runtimeUpdateCallback <- 1 barrier.wait() @@ -409,8 +409,8 @@ func TestServiceWithDefaultRatelimitHeaders(test *testing.T) { // Config reload. barrier := newBarrier() t.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Manager) { barrier.signal() }).Return(t.config) + []config.RateLimitConfigToLoad{{Name: "config.basic_config", FileBytes: "fake_yaml"}}, gomock.Any(), gomock.Any()).Do( + func([]config.RateLimitConfigToLoad, stats.Manager, bool) { barrier.signal() }).Return(t.config) t.runtimeUpdateCallback <- 1 barrier.wait() @@ -501,8 +501,8 @@ func TestInitialLoadError(test *testing.T) { t.snapshot.EXPECT().Keys().Return([]string{"foo", "config.basic_config"}).MinTimes(1) t.snapshot.EXPECT().Get("config.basic_config").Return("fake_yaml").MinTimes(1) t.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Manager) { + []config.RateLimitConfigToLoad{{Name: "config.basic_config", FileBytes: "fake_yaml"}}, gomock.Any(), gomock.Any()).Do( + func([]config.RateLimitConfigToLoad, stats.Manager, bool) { panic(config.RateLimitConfigError("load error")) }) service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statsManager, true, t.mockClock, false) From ea444b15aa463a979a6be3921ff2a4423480a566 Mon Sep 17 00:00:00 2001 From: Marcel Wiederer <14212817+m-rcl@users.noreply.github.com> Date: Mon, 12 Sep 2022 17:05:38 +0200 Subject: [PATCH 044/181] Reload configuration when deleting config files (#361) Signed-off-by: Marcel Wiederer --- Makefile | 2 +- README.md | 7 ++++ go.mod | 6 ++-- go.sum | 17 ++++------ src/server/server_impl.go | 16 ++++++--- test/integration/integration_test.go | 49 +++++++++++++++++----------- 6 files changed, 60 insertions(+), 37 deletions(-) diff --git a/Makefile b/Makefile index 857788cd..2f98413d 100644 --- a/Makefile +++ b/Makefile @@ -142,7 +142,7 @@ docker_multiarch_push: docker_multiarch_image .PHONY: integration_tests integration_tests: - docker-compose --project-dir $(PWD) -f integration-test/docker-compose-integration-test.yml up --build --exit-code-from tester + docker-compose --project-directory $(PWD) -f integration-test/docker-compose-integration-test.yml up --build --exit-code-from tester .PHONY: precommit_install precommit_install: diff --git a/README.md b/README.md index a5d3040f..7be258c7 100644 --- a/README.md +++ b/README.md @@ -513,6 +513,13 @@ There are two methods for triggering a configuration reload: The former is the default behavior. To use the latter method, set the `RUNTIME_WATCH_ROOT` environment variable to `false`. +The following filesystem operations on configuration files inside `RUNTIME_ROOT/RUNTIME_SUBDIRECTORY/config/` will force a reload of all config files: + +- Write +- Create +- Chmod +- Remove + For more information on how runtime works you can read its [README](https://github.com/lyft/goruntime). By default it is not possible to define multiple configuration files within `RUNTIME_SUBDIRECTORY` referencing the same domain. diff --git a/go.mod b/go.mod index f13b19cf..e3c741b6 100644 --- a/go.mod +++ b/go.mod @@ -12,8 +12,8 @@ require ( github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 github.com/kavu/go_reuseport v1.2.0 github.com/kelseyhightower/envconfig v1.4.0 - github.com/lyft/goruntime v0.2.5 - github.com/lyft/gostats v0.4.0 + github.com/lyft/goruntime v0.3.0 + github.com/lyft/gostats v0.4.1 github.com/mediocregopher/radix/v3 v3.8.1 github.com/sirupsen/logrus v1.6.0 github.com/stretchr/testify v1.7.1 @@ -40,7 +40,7 @@ require ( github.com/cespare/xxhash v1.1.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect - github.com/fsnotify/fsnotify v1.4.7 // indirect + github.com/fsnotify/fsnotify v1.4.9 // indirect github.com/google/uuid v1.3.0 github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect diff --git a/go.sum b/go.sum index 3fcb608b..bb29cc18 100644 --- a/go.sum +++ b/go.sum @@ -77,8 +77,8 @@ github.com/envoyproxy/go-control-plane v0.10.1 h1:cgDRLG7bs59Zd+apAWuzLQL95obVYA github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -151,7 +151,6 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 h1:VQjjMh+uElTfioy6GnUrVrTMAiLTNF3xsrAlSwC+g8o= github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= @@ -172,10 +171,10 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lyft/goruntime v0.2.5 h1:yRmwOXl3Zns3+Z03fDMWt5+p609rfhIErh7HYCayODg= -github.com/lyft/goruntime v0.2.5/go.mod h1:8rUh5gwIPQtyIkIXHbLN1j45HOb8cMgDhrw5GA7DF4g= -github.com/lyft/gostats v0.4.0 h1:PbRWmwidTPk6Y80S6itBWDa+XVt1hGvqFM88TBJYdOo= -github.com/lyft/gostats v0.4.0/go.mod h1:Tpx2xRzz4t+T2Tx0xdVgIoBdR2UMVz+dKnE3X01XSd8= +github.com/lyft/goruntime v0.3.0 h1:VLBYR4s3XazkUT8lLtq9CJrt58YmLQQumrK3ktenEkI= +github.com/lyft/goruntime v0.3.0/go.mod h1:BW1gngSpMJR9P9w23BPUPdhdbUWhpirl98TQhOWWMF4= +github.com/lyft/gostats v0.4.1 h1:oR6p4HRCGxt0nUntmZIWmYMgyothBi3eZH2A71vRjsc= +github.com/lyft/gostats v0.4.1/go.mod h1:Tpx2xRzz4t+T2Tx0xdVgIoBdR2UMVz+dKnE3X01XSd8= github.com/mediocregopher/radix/v3 v3.8.1 h1:rOkHflVuulFKlwsLY01/M2cM2tWCjDoETcMqKbAWu1M= github.com/mediocregopher/radix/v3 v3.8.1/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -329,6 +328,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -350,12 +350,9 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/src/server/server_impl.go b/src/server/server_impl.go index 8fa31d2d..d98fc5ad 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -241,23 +241,31 @@ func newServer(s settings.Settings, name string, statsManager stats.Manager, loc } else { loaderOpts = append(loaderOpts, loader.AllowDotFiles) } - + var err error if s.RuntimeWatchRoot { - ret.runtime = loader.New( + ret.runtime, err = loader.New2( s.RuntimePath, s.RuntimeSubdirectory, ret.store.ScopeWithTags("runtime", s.ExtraTags), &loader.SymlinkRefresher{RuntimePath: s.RuntimePath}, loaderOpts...) } else { - ret.runtime = loader.New( + directoryRefresher := &loader.DirectoryRefresher{} + // Adding loader.Remove to the default set of goruntime's FileSystemOps. + directoryRefresher.WatchFileSystemOps(loader.Remove, loader.Write, loader.Create, loader.Chmod) + + ret.runtime, err = loader.New2( filepath.Join(s.RuntimePath, s.RuntimeSubdirectory), "config", ret.store.ScopeWithTags("runtime", s.ExtraTags), - &loader.DirectoryRefresher{}, + directoryRefresher, loaderOpts...) } + if err != nil { + panic(err) + } + // setup http router ret.router = mux.NewRouter() diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 597ea1f3..a835c508 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -696,7 +696,7 @@ func testConfigReload(s settings.Settings) func(*testing.T) { assert.NoError(err) runner.GetStatsStore().Flush() - loadCount1 := runner.GetStatsStore().NewCounter("ratelimit.service.config_load_success").Value() + loadCountBefore := runner.GetStatsStore().NewCounter("ratelimit.service.config_load_success").Value() // Copy a new file to config folder to test config reload functionality in, err := os.Open("runtime/current/ratelimit/reload.yaml") @@ -718,26 +718,10 @@ func testConfigReload(s settings.Settings) func(*testing.T) { panic(err) } - // Need to wait for config reload to take place and new descriptors to be loaded. - // Shouldn't take more than 5 seconds but wait 120 at most just to be safe. - wait := 120 - reloaded := false - loadCount2 := uint64(0) - - for i := 0; i < wait; i++ { - time.Sleep(1 * time.Second) - runner.GetStatsStore().Flush() - loadCount2 = runner.GetStatsStore().NewCounter("ratelimit.service.config_load_success").Value() - - // Check that successful loads count has increased before continuing. - if loadCount2 > loadCount1 { - reloaded = true - break - } - } + loadCountAfter, reloaded := waitForConfigReload(runner, loadCountBefore) assert.True(reloaded) - assert.Greater(loadCount2, loadCount1) + assert.Greater(loadCountAfter, loadCountBefore) response, err = c.ShouldRateLimit( context.Background(), @@ -759,5 +743,32 @@ func testConfigReload(s settings.Settings) func(*testing.T) { if err != nil { panic(err) } + + // Removal of config files must trigger a reload + loadCountBefore = loadCountAfter + loadCountAfter, reloaded = waitForConfigReload(runner, loadCountBefore) + assert.True(reloaded) + assert.Greater(loadCountAfter, loadCountBefore) + } +} + +func waitForConfigReload(runner *runner.Runner, loadCountBefore uint64) (uint64, bool) { + // Need to wait for config reload to take place and new descriptors to be loaded. + // Shouldn't take more than 5 seconds but wait 120 at most just to be safe. + wait := 120 + reloaded := false + loadCountAfter := uint64(0) + + for i := 0; i < wait; i++ { + time.Sleep(1 * time.Second) + runner.GetStatsStore().Flush() + loadCountAfter = runner.GetStatsStore().NewCounter("ratelimit.service.config_load_success").Value() + + // Check that successful loads count has increased before continuing. + if loadCountAfter > loadCountBefore { + reloaded = true + break + } } + return loadCountAfter, reloaded } From 73d7295bab0056336d9ab7904f297d7a937057be Mon Sep 17 00:00:00 2001 From: Dhi Aurrahman Date: Mon, 19 Sep 2022 23:49:45 +0700 Subject: [PATCH 045/181] Upgrade golang.org/x/net (#364) To fix CVE-2022-27664. Signed-off-by: Dhi Aurrahman --- Dockerfile | 2 +- go.mod | 4 ++-- go.sum | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Dockerfile b/Dockerfile index 36564b89..685af6d1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,6 +10,6 @@ COPY script script RUN CGO_ENABLED=0 GOOS=linux go build -o /go/bin/ratelimit -ldflags="-w -s" -v github.com/envoyproxy/ratelimit/src/service_cmd -FROM alpine:3.15 AS final +FROM alpine:3.16 AS final RUN apk --no-cache add ca-certificates && apk --no-cache update COPY --from=build /go/bin/ratelimit /bin/ratelimit diff --git a/go.mod b/go.mod index e3c741b6..b6a6df9e 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/mediocregopher/radix/v3 v3.8.1 github.com/sirupsen/logrus v1.6.0 github.com/stretchr/testify v1.7.1 - golang.org/x/net v0.0.0-20220325170049-de3da57026de + golang.org/x/net v0.0.0-20220909164309-bea034e7d591 google.golang.org/grpc v1.45.0 gopkg.in/yaml.v2 v2.3.0 ) @@ -54,6 +54,6 @@ require ( go.opentelemetry.io/otel/sdk v1.7.0 go.opentelemetry.io/otel/trace v1.7.0 go.opentelemetry.io/proto/otlp v0.16.0 // indirect - golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 // indirect + golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect golang.org/x/text v0.3.7 // indirect ) diff --git a/go.sum b/go.sum index bb29cc18..c3150920 100644 --- a/go.sum +++ b/go.sum @@ -298,8 +298,8 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220325170049-de3da57026de h1:pZB1TWnKi+o4bENlbzAgLrEbY4RMYmUIRobMcSmfeYc= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591 h1:D0B/7al0LLrVC8aWF4+oxpv/m8bc7ViFfVS8/gXGdqI= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -350,8 +350,8 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc= -golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From 3af40e61fcceb60551e3d0a650e8707b4f78a181 Mon Sep 17 00:00:00 2001 From: "Ira W. Snyder" Date: Thu, 22 Sep 2022 00:53:14 -0500 Subject: [PATCH 046/181] Add support for Redis AUTH with username and password (#363) * Add support for Redis AUTH with username and password The current code only supports Redis AUTH with the username `default`. Add support for Redis AUTH with username and password. Based on this PR for the upstream library: https://github.com/mediocregopher/radix/pull/195/files Signed-off-by: Ira W. Snyder * Update miniredis dependency The updated version of the miniredis dependency adds the `RequireUserAuth()` method, which configures support for username-password authentication. Signed-off-by: Ira W. Snyder * Add tests Add two tests: * check that username-password auth works successfully * check that username-password auth fails with invalid password Signed-off-by: Ira W. Snyder * Update README Signed-off-by: Ira W. Snyder Signed-off-by: Ira W. Snyder --- README.md | 3 ++- go.mod | 6 +++--- go.sum | 6 ++++++ src/redis/driver_impl.go | 11 ++++++++--- test/redis/driver_impl_test.go | 27 +++++++++++++++++++++++++++ 5 files changed, 46 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 7be258c7..7ec4569f 100644 --- a/README.md +++ b/README.md @@ -727,7 +727,8 @@ As well Ratelimit supports TLS connections and authentication. These can be conf 1. `REDIS_TLS` & `REDIS_PERSECOND_TLS`: set to `"true"` to enable a TLS connection for the specific connection type. 1. `REDIS_TLS_CLIENT_CERT`, `REDIS_TLS_CLIENT_KEY`, and `REDIS_TLS_CACERT` to provides files to specify a TLS connection configuration to Redis server that requires client certificate verification. (This is effective when `REDIS_TLS` or `REDIS_PERSECOND_TLS` is set to to `"true"`). -1. `REDIS_AUTH` & `REDIS_PERSECOND_AUTH`: set to `"password"` to enable authentication to the redis host. +1. `REDIS_AUTH` & `REDIS_PERSECOND_AUTH`: set to `"password"` to enable password-only authentication to the redis host. +1. `REDIS_AUTH` & `REDIS_PERSECOND_AUTH`: set to `"username:password"` to enable username-password authentication to the redis host. 1. `CACHE_KEY_PREFIX`: a string to prepend to all cache keys ## Redis type diff --git a/go.mod b/go.mod index b6a6df9e..6036fa96 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/envoyproxy/ratelimit go 1.18 require ( - github.com/alicebob/miniredis/v2 v2.11.4 + github.com/alicebob/miniredis/v2 v2.23.0 github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b github.com/coocood/freecache v1.1.0 github.com/envoyproxy/go-control-plane v0.10.1 @@ -36,7 +36,7 @@ require ( ) require ( - github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 // indirect + github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect @@ -45,7 +45,7 @@ require ( github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/objx v0.2.0 // indirect - github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb // indirect + github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 go.opentelemetry.io/otel v1.7.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.6.3 diff --git a/go.sum b/go.sum index c3150920..ce127e13 100644 --- a/go.sum +++ b/go.sum @@ -38,8 +38,12 @@ github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 h1:45bxf7AZMwWcqkLzDAQugVEwedisr5nRJ1r+7LYnv0U= github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= +github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis/v2 v2.11.4 h1:GsuyeunTx7EllZBU3/6Ji3dhMQZDpC9rLf1luJ+6M5M= github.com/alicebob/miniredis/v2 v2.11.4/go.mod h1:VL3UDEfAH59bSa7MuHMuFToxkqyHh69s/WUbYlOAuyg= +github.com/alicebob/miniredis/v2 v2.23.0 h1:+lwAJYjvvdIVg6doFHuotFjueJ/7KY10xo/vm3X3Scw= +github.com/alicebob/miniredis/v2 v2.23.0/go.mod h1:XNqvJdQJv5mSuVMc0ynneafpnL/zv52acZ6kqeS0t88= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b h1:L/QXpzIa3pOvUGt1D1lA5KjYhPBAN/3iWdP7xeFS9F0= github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= @@ -202,6 +206,8 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb h1:ZkM6LRnq40pR1Ox0hTHlnpkcOTuFIDQpZ1IN8rKKhX0= github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ= +github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw= +github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index f1516da2..9fac55b5 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -77,9 +77,14 @@ func NewClientImpl(scope stats.Scope, useTls bool, auth, redisSocketType, redisT } if auth != "" { - logger.Warnf("enabling authentication to redis on %s", maskedUrl) - - dialOpts = append(dialOpts, radix.DialAuthPass(auth)) + user, pass, found := strings.Cut(auth, ":") + if found { + logger.Warnf("enabling authentication to redis on %s with user %s", maskedUrl, user) + dialOpts = append(dialOpts, radix.DialAuthUser(user, pass)) + } else { + logger.Warnf("enabling authentication to redis on %s without user", maskedUrl) + dialOpts = append(dialOpts, radix.DialAuthPass(auth)) + } } return radix.Dial(network, addr, dialOpts...) diff --git a/test/redis/driver_impl_test.go b/test/redis/driver_impl_test.go index 3f924e4a..cbb5a956 100644 --- a/test/redis/driver_impl_test.go +++ b/test/redis/driver_impl_test.go @@ -1,6 +1,7 @@ package redis_test import ( + "fmt" "testing" "time" @@ -80,6 +81,32 @@ func testNewClientImpl(t *testing.T, pipelineWindow time.Duration, pipelineLimit }) }) + t.Run("auth user pass", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + user, pass := "test-user", "test-pass" + redisSrv.RequireUserAuth(user, pass) + + redisAuth := fmt.Sprintf("%s:%s", user, pass) + assert.NotPanics(t, func() { + mkRedisClient(redisAuth, redisSrv.Addr()) + }) + }) + + t.Run("auth user pass fail", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + user, pass := "test-user", "test-pass" + redisSrv.RequireUserAuth(user, pass) + + redisAuth := fmt.Sprintf("%s:invalid-password", user) + assert.PanicsWithError(t, "WRONGPASS invalid username-password pair", func() { + mkRedisClient(redisAuth, redisSrv.Addr()) + }) + }) + t.Run("ImplicitPipeliningEnabled() return expected value", func(t *testing.T) { redisSrv := mustNewRedisServer() defer redisSrv.Close() From 0b2f4d5fb04bf55e1873e2c5e2bb28da67c0643f Mon Sep 17 00:00:00 2001 From: Tim Bart Date: Wed, 28 Sep 2022 08:01:43 -0700 Subject: [PATCH 047/181] tracing: make sampling rate configurable (#359) The `AlwaysSample()` sampler is not recommended when the volume of traces collected can be large. This commit adds the ability to configure the sampling rate while respecting parent sampling decisions Details at: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk.md#parentbased Signed-off-by: Tim Bart --- README.md | 1 + src/service_cmd/runner/runner.go | 2 +- src/settings/settings.go | 2 ++ src/trace/trace.go | 13 ++++++++++--- 4 files changed, 14 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 7ec4569f..1fd24af4 100644 --- a/README.md +++ b/README.md @@ -845,6 +845,7 @@ The following environment variables control the tracing feature: 1. `TRACING_SERVICE_NAMESPACE` - Controls the service namespace appears in tracing span. The default value is empty. 1. `TRACING_SERVICE_INSTANCE_ID` - Controls the service instance id appears in tracing span. It is recommended to put the pod name or container name in this field. The default value is a randomly generated version 4 uuid if unspecified. 1. Other fields in [OTLP Exporter Documentation](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.8.0/specification/protocol/exporter.md). These section needs to be correctly configured in order to enable the exporter to export span to the correct destination. +1. `TRACING_SAMPLING_RATE` - Controls the sampling rate, defaults to 1 which means always sample. Valid range: 0.0-1.0. For high volume services, adjusting the sampling rate is recommended. # mTLS diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index c72c4982..74a8105e 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -78,7 +78,7 @@ func createLimiter(srv server.Server, s settings.Settings, localCache *freecache func (runner *Runner) Run() { s := runner.settings if s.TracingEnabled { - tp := trace.InitProductionTraceProvider(s.TracingExporterProtocol, s.TracingServiceName, s.TracingServiceNamespace, s.TracingServiceInstanceId) + tp := trace.InitProductionTraceProvider(s.TracingExporterProtocol, s.TracingServiceName, s.TracingServiceNamespace, s.TracingServiceInstanceId, s.TracingSamplingRate) defer func() { if err := tp.Shutdown(context.Background()); err != nil { logger.Printf("Error shutting down tracer provider: %v", err) diff --git a/src/settings/settings.go b/src/settings/settings.go index 0f8f501e..d0731511 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -135,6 +135,8 @@ type Settings struct { // can only be http or gRPC TracingExporterProtocol string `envconfig:"TRACING_EXPORTER_PROTOCOL" default:"http"` // detailed setting of exporter should refer to https://opentelemetry.io/docs/reference/specification/protocol/exporter/, e.g. OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TIMEOUT + // TracingSamplingRate defaults to 1 which amounts to using the `AlwaysSample` sampler + TracingSamplingRate float64 `envconfig:"TRACING_SAMPLING_RATE" default:"1"` } type Option func(*Settings) diff --git a/src/trace/trace.go b/src/trace/trace.go index fed78e7a..6fa49b65 100644 --- a/src/trace/trace.go +++ b/src/trace/trace.go @@ -22,7 +22,7 @@ var ( testSpanExporterMu sync.Mutex ) -func InitProductionTraceProvider(protocol string, serviceName string, serviceNamespace string, serviceInstanceId string) *sdktrace.TracerProvider { +func InitProductionTraceProvider(protocol string, serviceName string, serviceNamespace string, serviceInstanceId string, samplingRate float64) *sdktrace.TracerProvider { client := createClient(protocol) exporter, err := otlptrace.New(context.Background(), client) if err != nil { @@ -50,14 +50,21 @@ func InitProductionTraceProvider(protocol string, serviceName string, serviceNam if err != nil { logger.Fatal(err) } + // trace if parent contains root span and is sampled + // otherwise only trace according to sampling rate + // if samplingRate >= 1, the AlwaysSample sampler is used + // if samplingRate <= 0, the NeverSampler sampler is used + sampler := sdktrace.ParentBased(sdktrace.TraceIDRatioBased(samplingRate)) + tp := sdktrace.NewTracerProvider( - sdktrace.WithSampler(sdktrace.AlwaysSample()), + sdktrace.WithSampler(sampler), sdktrace.WithBatcher(exporter), sdktrace.WithResource(resource), ) otel.SetTracerProvider(tp) otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})) - logger.Infof("TracerProvider initialized with following parameters: protocol: %s, serviceName: %s, serviceNamespace: %s, serviceInstanceId: %s", protocol, serviceName, serviceNamespace, useServiceInstanceId) + logger.Infof("TracerProvider initialized with following parameters: protocol: %s, serviceName: %s, serviceNamespace: %s, serviceInstanceId: %s, samplingRate: %f", + protocol, serviceName, serviceNamespace, useServiceInstanceId, samplingRate) return tp } From bc3eca49d4a3c1bd601d2e543c306ea942808277 Mon Sep 17 00:00:00 2001 From: Renuka Piyumal Fernando Date: Sat, 5 Nov 2022 03:52:14 +0530 Subject: [PATCH 048/181] Fix data race issue in globalShadowMode variable (#370) globalShadowMode is written when config is reloaded and read in here, which may lead to data race condition Signed-off-by: Renuka Fernando --- src/service/ratelimit.go | 14 +++++++------- src/service_cmd/runner/runner.go | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index 621a83df..902c8084 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -32,7 +32,7 @@ var tracer = otel.Tracer("ratelimit") type RateLimitServiceServer interface { pb.RateLimitServiceServer - GetCurrentConfig() config.RateLimitConfig + GetCurrentConfig() (config.RateLimitConfig, bool) } type service struct { @@ -107,8 +107,7 @@ func checkServiceErr(something bool, msg string) { } } -func (this *service) constructLimitsToCheck(request *pb.RateLimitRequest, ctx context.Context) ([]*config.RateLimit, []bool) { - snappedConfig := this.GetCurrentConfig() +func (this *service) constructLimitsToCheck(request *pb.RateLimitRequest, ctx context.Context, snappedConfig config.RateLimitConfig) ([]*config.RateLimit, []bool) { checkServiceErr(snappedConfig != nil, "no rate limit configuration loaded") limitsToCheck := make([]*config.RateLimit, len(request.Descriptors)) @@ -180,7 +179,8 @@ func (this *service) shouldRateLimitWorker( checkServiceErr(request.Domain != "", "rate limit domain must not be empty") checkServiceErr(len(request.Descriptors) != 0, "rate limit descriptor list must not be empty") - limitsToCheck, isUnlimited := this.constructLimitsToCheck(request, ctx) + snappedConfig, globalShadowMode := this.GetCurrentConfig() + limitsToCheck, isUnlimited := this.constructLimitsToCheck(request, ctx, snappedConfig) responseDescriptorStatuses := this.cache.DoLimit(ctx, request, limitsToCheck) assert.Assert(len(limitsToCheck) == len(responseDescriptorStatuses)) @@ -228,7 +228,7 @@ func (this *service) shouldRateLimitWorker( } // If there is a global shadow_mode, it should always return OK - if finalCode == pb.RateLimitResponse_OVER_LIMIT && this.globalShadowMode { + if finalCode == pb.RateLimitResponse_OVER_LIMIT && globalShadowMode { finalCode = pb.RateLimitResponse_OK this.stats.GlobalShadowMode.Inc() } @@ -306,10 +306,10 @@ func (this *service) ShouldRateLimit( return response, nil } -func (this *service) GetCurrentConfig() config.RateLimitConfig { +func (this *service) GetCurrentConfig() (config.RateLimitConfig, bool) { this.configLock.RLock() defer this.configLock.RUnlock() - return this.config + return this.config, this.globalShadowMode } func NewService(runtime loader.IFace, cache limiter.RateLimitCache, diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 74a8105e..605aed0e 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -130,7 +130,7 @@ func (runner *Runner) Run() { "/rlconfig", "print out the currently loaded configuration for debugging", func(writer http.ResponseWriter, request *http.Request) { - if current := service.GetCurrentConfig(); current != nil { + if current, _ := service.GetCurrentConfig(); current != nil { io.WriteString(writer, current.Dump()) } }) From a091203eb9b28165a5e49215dfab5d6a096327c6 Mon Sep 17 00:00:00 2001 From: Arko Dasgupta Date: Tue, 15 Nov 2022 12:05:51 -0800 Subject: [PATCH 049/181] Use _imaps._tcp.gmail.com in srv tests (#375) * Seeing tests failures in CI with the current SRV entry `_xmpp-server._tcp.gmail.com.` https://github.com/envoyproxy/ratelimit/actions/runs/3466737811/jobs/5802374477#step:3:257 * What is odd is I also tried `_xmpp-server._tcp.google.com.` which is used in the upstream `net.LookupSRV` tests https://cs.opensource.google/go/go/+/master:src/net/lookup_test.go;l=47;drc=2b59307ac21135ab8db58e08fb98211fbedbb10d?q=LookupSRV&ss=go%2Fgo but that failed as well Signed-off-by: Arko Dasgupta --- test/srv/srv_test.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/test/srv/srv_test.go b/test/srv/srv_test.go index e5e273bf..3d0d3ab5 100644 --- a/test/srv/srv_test.go +++ b/test/srv/srv_test.go @@ -49,12 +49,11 @@ func TestServerStringsFromSevWhenSrvIsWellFormedButNotLookupable(t *testing.T) { } func TestServerStrings(t *testing.T) { - // it seems reasonable to think _xmpp-server._tcp.gmail.com will be available for a long time! srvResolver := srv.DnsSrvResolver{} - servers, err := srvResolver.ServerStringsFromSrv("_xmpp-server._tcp.gmail.com.") + servers, err := srvResolver.ServerStringsFromSrv("_imaps._tcp.gmail.com.") + assert.Nil(t, err) assert.True(t, len(servers) > 0) for _, s := range servers { - assert.Regexp(t, `^.*xmpp-server.*google.com.:\d+$`, s) + assert.Regexp(t, `^.*imap.*gmail.com.:\d+$`, s) } - assert.Nil(t, err) } From 3cb4326e68577b99bcf5387950dc05620080e90a Mon Sep 17 00:00:00 2001 From: Arko Dasgupta Date: Tue, 15 Nov 2022 12:13:54 -0800 Subject: [PATCH 050/181] Expose Config structures outside package (#374) * Allow downstream projects to reuse the config structure and generate the rate limit service config programatically w/o creating their own custom structures. Signed-off-by: Arko Dasgupta --- src/config/config_impl.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/config/config_impl.go b/src/config/config_impl.go index aa440400..806c6411 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -17,7 +17,7 @@ type yamlReplaces struct { Name string } -type yamlRateLimit struct { +type YamlRateLimit struct { RequestsPerUnit uint32 `yaml:"requests_per_unit"` Unit string Unlimited bool `yaml:"unlimited"` @@ -25,17 +25,17 @@ type yamlRateLimit struct { Replaces []yamlReplaces } -type yamlDescriptor struct { +type YamlDescriptor struct { Key string Value string - RateLimit *yamlRateLimit `yaml:"rate_limit"` - Descriptors []yamlDescriptor + RateLimit *YamlRateLimit `yaml:"rate_limit"` + Descriptors []YamlDescriptor ShadowMode bool `yaml:"shadow_mode"` } -type yamlRoot struct { +type YamlRoot struct { Domain string - Descriptors []yamlDescriptor + Descriptors []YamlDescriptor } type rateLimitDescriptor struct { @@ -116,7 +116,7 @@ func newRateLimitConfigError(config RateLimitConfigToLoad, err string) RateLimit // @param parentKey supplies the fully resolved key name that owns this config level. // @param descriptors supplies the YAML descriptors to load. // @param statsManager that owns the stats.Scope. -func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, parentKey string, descriptors []yamlDescriptor, statsManager stats.Manager) { +func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, parentKey string, descriptors []YamlDescriptor, statsManager stats.Manager) { for _, descriptorConfig := range descriptors { if descriptorConfig.Key == "" { panic(newRateLimitConfigError(config, "descriptor has empty key")) @@ -245,7 +245,7 @@ func (this *rateLimitConfigImpl) loadConfig(config RateLimitConfigToLoad) { } validateYamlKeys(config, any) - var root yamlRoot + var root YamlRoot err = yaml.Unmarshal([]byte(config.FileBytes), &root) if err != nil { errorText := fmt.Sprintf("error loading config file: %s", err.Error()) From 5e9a43f9f57e0aaca83501fb200abc009a4f3624 Mon Sep 17 00:00:00 2001 From: Renuka Piyumal Fernando Date: Mon, 12 Dec 2022 22:17:56 +0530 Subject: [PATCH 051/181] Upgrade golang.org/x/text to 0.3.8 (#382) Ref: https://github.com/golang/go/issues/56152 Signed-off-by: Renuka Fernando --- go.mod | 2 +- go.sum | 12 ++---------- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/go.mod b/go.mod index 6036fa96..d816fc68 100644 --- a/go.mod +++ b/go.mod @@ -55,5 +55,5 @@ require ( go.opentelemetry.io/otel/trace v1.7.0 go.opentelemetry.io/proto/otlp v0.16.0 // indirect golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect - golang.org/x/text v0.3.7 // indirect + golang.org/x/text v0.3.8 // indirect ) diff --git a/go.sum b/go.sum index ce127e13..f787b414 100644 --- a/go.sum +++ b/go.sum @@ -36,12 +36,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 h1:45bxf7AZMwWcqkLzDAQugVEwedisr5nRJ1r+7LYnv0U= -github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= -github.com/alicebob/miniredis/v2 v2.11.4 h1:GsuyeunTx7EllZBU3/6Ji3dhMQZDpC9rLf1luJ+6M5M= -github.com/alicebob/miniredis/v2 v2.11.4/go.mod h1:VL3UDEfAH59bSa7MuHMuFToxkqyHh69s/WUbYlOAuyg= github.com/alicebob/miniredis/v2 v2.23.0 h1:+lwAJYjvvdIVg6doFHuotFjueJ/7KY10xo/vm3X3Scw= github.com/alicebob/miniredis/v2 v2.23.0/go.mod h1:XNqvJdQJv5mSuVMc0ynneafpnL/zv52acZ6kqeS0t88= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -123,8 +119,6 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3 h1:6amM4HsNPOvMLVc2ZnyqrjeQ92YAVWn7T4WBKK87inY= -github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -204,8 +198,6 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb h1:ZkM6LRnq40pR1Ox0hTHlnpkcOTuFIDQpZ1IN8rKKhX0= -github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -365,8 +357,8 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 5f3f5a4cf573a1fbf0a56e1e7e73381705aeb77f Mon Sep 17 00:00:00 2001 From: Pratyush Singhal Date: Tue, 10 Jan 2023 00:45:24 +0530 Subject: [PATCH 052/181] chore: bump golang.org/x/net dep for cve-2022-41717 fix (#391) Signed-off-by: Pratyush Singhal --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index d816fc68..6506dba4 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/mediocregopher/radix/v3 v3.8.1 github.com/sirupsen/logrus v1.6.0 github.com/stretchr/testify v1.7.1 - golang.org/x/net v0.0.0-20220909164309-bea034e7d591 + golang.org/x/net v0.4.0 google.golang.org/grpc v1.45.0 gopkg.in/yaml.v2 v2.3.0 ) @@ -54,6 +54,6 @@ require ( go.opentelemetry.io/otel/sdk v1.7.0 go.opentelemetry.io/otel/trace v1.7.0 go.opentelemetry.io/proto/otlp v0.16.0 // indirect - golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect - golang.org/x/text v0.3.8 // indirect + golang.org/x/sys v0.3.0 // indirect + golang.org/x/text v0.5.0 // indirect ) diff --git a/go.sum b/go.sum index f787b414..79af9ed7 100644 --- a/go.sum +++ b/go.sum @@ -296,8 +296,8 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591 h1:D0B/7al0LLrVC8aWF4+oxpv/m8bc7ViFfVS8/gXGdqI= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -348,8 +348,8 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -357,8 +357,8 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From f28024e33b82c18f1bf0128780b14fac2c126890 Mon Sep 17 00:00:00 2001 From: jespersoderlund Date: Tue, 17 Jan 2023 21:42:23 +0100 Subject: [PATCH 053/181] Include value in metrics for unspecified value (#389) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jesper Söderlund --- README.md | 48 +++++++++++++++++++ examples/envoy/proxy.yaml | 9 ++++ examples/ratelimit/config/example.yaml | 5 ++ ...-included-in-stats-key-when-unspecified.sh | 38 +++++++++++++++ src/config/config.go | 15 +++--- src/config/config_impl.go | 31 +++++++----- test/config/basic_config.yaml | 8 ++++ test/config/config_test.go | 36 ++++++++++++++ test/limiter/base_limiter_test.go | 16 +++---- test/memcached/cache_impl_test.go | 36 +++++++------- test/redis/bench_test.go | 2 +- test/redis/fixed_cache_impl_test.go | 30 ++++++------ test/service/ratelimit_test.go | 24 +++++----- 13 files changed, 226 insertions(+), 72 deletions(-) create mode 100755 integration-test/scripts/value-included-in-stats-key-when-unspecified.sh diff --git a/README.md b/README.md index 1fd24af4..ace942c3 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,7 @@ - [Rate limit definition](#rate-limit-definition) - [Replaces](#replaces) - [ShadowMode](#shadowmode) + - [Including detailed metrics for unspecified values](#including-detailed-metrics-for-unspecified-values) - [Examples](#examples) - [Example 1](#example-1) - [Example 2](#example-2) @@ -24,6 +25,7 @@ - [Example 5](#example-5) - [Example 6](#example-6) - [Example 7](#example-7) + - [Example 8](#example-8) - [Loading Configuration](#loading-configuration) - [Log Format](#log-format) - [GRPC Keepalive](#grpc-keepalive) @@ -205,6 +207,7 @@ descriptors: unit: requests_per_unit: shadow_mode: (optional) + detailed_metric: (optional) descriptors: (optional block) - ... (nested repetition of above) ``` @@ -253,6 +256,12 @@ An additional statistic is added to keep track of how many times a key with "sha There is also a Global Shadow Mode +### Including detailed metrics for unspecified values + +Setting the `detailed_metric: true` for a descriptor will extend the metrics that are produced. Normally a desriptor that matches a value that is not explicitly listed in the configuration will from a metrics point-of-view be rolled-up into the base entry. This can be probelmatic if you want to have those details available for analysis. + +NB! This should only be enabled in situations where the potentially large cardinality of metrics that this can lead to is acceptable. + ### Examples #### Example 1 @@ -490,6 +499,43 @@ descriptors: unit: second ``` +#### Example 8 + +In this example we demonstrate how a descriptor without a specified value is configured to override the default behavior and include the matched-value in the metrics. + +Rate limting configuration and tracking works as normally + +``` +(key_1, unspecified_value): 10 / sec +(key_1, unspecified_value2): 10 / sec +(key_1, value_1): 20 / sec +``` + +```yaml +domain: example8 +descriptors: + - key: key1 + detailed_metric: true + rate_limit: + unit: minute + requests_per_unit: 10 + - key: key1 + value: value1 + rate_limit: + unit: minute + requests_per_unit: 20 +``` + +The metrics keys will be the following: + +"key1_unspecified_value" +"key1_unspecified_value2" +"key1_value1" + +rather than the normal +"key1" +"key1_value1" + ## Loading Configuration The Ratelimit service uses a library written by Lyft called [goruntime](https://github.com/lyft/goruntime) to do configuration loading. Goruntime monitors @@ -623,6 +669,8 @@ KEY_VALUE: - A combination of the key value - Nested descriptors would be suffixed in the stats path +The default mode is that the value-part is omitted if the rule that matches is a descriptor without a value. Specifying the `detailed_metric` configuration parameter changes this behavior and creates a unique metric even in this situation. + STAT: - near_limit: Number of rule hits over the NearLimit ratio threshold (currently 80%) but under the threshold rate. diff --git a/examples/envoy/proxy.yaml b/examples/envoy/proxy.yaml index a0e2d1e9..a4a6a87f 100644 --- a/examples/envoy/proxy.yaml +++ b/examples/envoy/proxy.yaml @@ -149,3 +149,12 @@ static_resources: - request_headers: header_name: "category" descriptor_key: "category" + - match: + prefix: /unspec + route: + cluster: mock + rate_limits: + - actions: + - request_headers: + header_name: "unspec" + descriptor_key: "unspec" diff --git a/examples/ratelimit/config/example.yaml b/examples/ratelimit/config/example.yaml index 52cc8e42..1c3d0c19 100644 --- a/examples/ratelimit/config/example.yaml +++ b/examples/ratelimit/config/example.yaml @@ -76,6 +76,11 @@ descriptors: - key: bay rate_limit: unlimited: true + - key: unspec + detailed_metric: true + rate_limit: + unit: minute + requests_per_unit: 2 - key: qux rate_limit: unlimited: true diff --git a/integration-test/scripts/value-included-in-stats-key-when-unspecified.sh b/integration-test/scripts/value-included-in-stats-key-when-unspecified.sh new file mode 100755 index 00000000..a3851cef --- /dev/null +++ b/integration-test/scripts/value-included-in-stats-key-when-unspecified.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +# +# descriptor: (unspec: *) +# Has rate limit quota 2 req / min +# detailed_metric is true +# + +response=$(curl -f -s -H "unspec: unspecified_value" http://envoy-proxy:8888/unspec) +response=$(curl -f -s -H "unspec: unspecified_value" http://envoy-proxy:8888/unspec) + +# This should be successful +if [ $? -ne 0 ]; then + echo "These should not be rate limited" + exit 1 +fi + +# This one should be ratelimited +response=$(curl -f -s -H "unspec: unspecified_value" http://envoy-proxy:8888/unspec) + +if [ $? -eq 0 ]; then + echo "This should be a ratelimited call" + exit 1 +fi + +# Sleep a bit to allow the stats to be propagated +sleep 2 + +# Extract the metric for the unspecified value, which shoulb be there due to the "detailed_metric" +stats=$(curl -f -s statsd:9102/metrics | grep -e ratelimit_service_rate_limit_over_limit | grep unspec_unspecified_value | cut -d} -f2 | sed 's/ //g') + +echo "Length: ${#stats}" +echo "${stats}" + +if [ "${stats}" != "1" ]; then + echo "Overlimit should be 1" + exit 1 +fi diff --git a/src/config/config.go b/src/config/config.go index 7a22734d..0dd6d803 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -17,13 +17,14 @@ func (e RateLimitConfigError) Error() string { // Wrapper for an individual rate limit config entry which includes the defined limit and stats. type RateLimit struct { - FullKey string - Stats stats.RateLimitStats - Limit *pb.RateLimitResponse_RateLimit - Unlimited bool - ShadowMode bool - Name string - Replaces []string + FullKey string + Stats stats.RateLimitStats + Limit *pb.RateLimitResponse_RateLimit + Unlimited bool + ShadowMode bool + Name string + Replaces []string + IncludeValueInMetricWhenNotSpecified bool } // Interface for interacting with a loaded rate limit config. diff --git a/src/config/config_impl.go b/src/config/config_impl.go index 806c6411..aa8cef9f 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -26,11 +26,12 @@ type YamlRateLimit struct { } type YamlDescriptor struct { - Key string - Value string - RateLimit *YamlRateLimit `yaml:"rate_limit"` - Descriptors []YamlDescriptor - ShadowMode bool `yaml:"shadow_mode"` + Key string + Value string + RateLimit *YamlRateLimit `yaml:"rate_limit"` + Descriptors []YamlDescriptor + ShadowMode bool `yaml:"shadow_mode"` + IncludeMetricsForUnspecifiedValue bool `yaml:"detailed_metric"` } type YamlRoot struct { @@ -65,6 +66,7 @@ var validKeys = map[string]bool{ "shadow_mode": true, "name": true, "replaces": true, + "detailed_metric": true, } // Create a new rate limit config entry. @@ -74,7 +76,7 @@ var validKeys = map[string]bool{ // @param unlimited supplies whether the rate limit is unlimited // @return the new config entry. func NewRateLimit(requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stats.RateLimitStats, - unlimited bool, shadowMode bool, name string, replaces []string) *RateLimit { + unlimited bool, shadowMode bool, name string, replaces []string, includeValueInMetricWhenNotSpecified bool) *RateLimit { return &RateLimit{ FullKey: rlStats.GetKey(), @@ -83,10 +85,11 @@ func NewRateLimit(requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Un RequestsPerUnit: requestsPerUnit, Unit: unit, }, - Unlimited: unlimited, - ShadowMode: shadowMode, - Name: name, - Replaces: replaces, + Unlimited: unlimited, + ShadowMode: shadowMode, + Name: name, + Replaces: replaces, + IncludeValueInMetricWhenNotSpecified: includeValueInMetricWhenNotSpecified, } } @@ -163,7 +166,7 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p rateLimit = NewRateLimit( descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), statsManager.NewStats(newParentKey), unlimited, descriptorConfig.ShadowMode, - descriptorConfig.RateLimit.Name, replaces, + descriptorConfig.RateLimit.Name, replaces, descriptorConfig.IncludeMetricsForUnspecifiedValue, ) rateLimitDebugString = fmt.Sprintf( " ratelimit={requests_per_unit=%d, unit=%s, unlimited=%t, shadow_mode=%t}", rateLimit.Limit.RequestsPerUnit, @@ -306,6 +309,7 @@ func (this *rateLimitConfigImpl) GetLimit( false, "", []string{}, + false, ) return rateLimit } @@ -325,6 +329,7 @@ func (this *rateLimitConfigImpl) GetLimit( if nextDescriptor != nil && nextDescriptor.limit != nil { logger.Debugf("found rate limit: %s", finalKey) + if i == len(descriptor.Entries)-1 { rateLimit = nextDescriptor.limit } else { @@ -336,6 +341,10 @@ func (this *rateLimitConfigImpl) GetLimit( logger.Debugf("iterating to next level") descriptorsMap = nextDescriptor.descriptors } else { + if rateLimit != nil && rateLimit.IncludeValueInMetricWhenNotSpecified { + rateLimit = NewRateLimit(rateLimit.Limit.RequestsPerUnit, rateLimit.Limit.Unit, this.statsManager.NewStats(rateLimit.FullKey+"_"+entry.Value), rateLimit.Unlimited, rateLimit.ShadowMode, rateLimit.Name, rateLimit.Replaces, false) + } + break } } diff --git a/test/config/basic_config.yaml b/test/config/basic_config.yaml index 33772366..1ce7c9af 100644 --- a/test/config/basic_config.yaml +++ b/test/config/basic_config.yaml @@ -60,3 +60,11 @@ descriptors: - key: key6 rate_limit: unlimited: true + + # Top level key only with default rate limit. + + - key: key7 + detailed_metric: true + rate_limit: + unit: minute + requests_per_unit: 70 diff --git a/test/config/config_test.go b/test/config/config_test.go index 2b4cb0a7..be2ab934 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -177,6 +177,42 @@ func TestBasicConfig(t *testing.T) { assert.True(rl.Unlimited) assert.EqualValues(1, stats.NewCounter("test-domain.key6.total_hits").Value()) assert.EqualValues(1, stats.NewCounter("test-domain.key6.within_limit").Value()) + + // A value for the key with detailed_metric: true + // should also generate a stat with the value included + rl = rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key7", Value: "unspecified_value"}}, + }) + rl.Stats.TotalHits.Inc() + rl.Stats.OverLimit.Inc() + rl.Stats.NearLimit.Inc() + rl.Stats.WithinLimit.Inc() + assert.EqualValues(70, rl.Limit.RequestsPerUnit) + assert.Equal(pb.RateLimitResponse_RateLimit_MINUTE, rl.Limit.Unit) + assert.EqualValues(1, stats.NewCounter("test-domain.key7_unspecified_value.total_hits").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key7_unspecified_value.over_limit").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key7_unspecified_value.near_limit").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key7_unspecified_value.within_limit").Value()) + + // Another value for the key with detailed_metric: true + // should also generate a stat with the value included + rl = rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key7", Value: "another_value"}}, + }) + rl.Stats.TotalHits.Inc() + rl.Stats.OverLimit.Inc() + rl.Stats.NearLimit.Inc() + rl.Stats.WithinLimit.Inc() + assert.EqualValues(70, rl.Limit.RequestsPerUnit) + assert.Equal(pb.RateLimitResponse_RateLimit_MINUTE, rl.Limit.Unit) + assert.EqualValues(1, stats.NewCounter("test-domain.key7_another_value.total_hits").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key7_another_value.over_limit").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key7_another_value.near_limit").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key7_another_value.within_limit").Value()) } func TestDomainMerge(t *testing.T) { diff --git a/test/limiter/base_limiter_test.go b/test/limiter/base_limiter_test.go index c37ad5e3..b3babcee 100644 --- a/test/limiter/base_limiter_test.go +++ b/test/limiter/base_limiter_test.go @@ -29,7 +29,7 @@ func TestGenerateCacheKeys(t *testing.T) { timeSource.EXPECT().UnixNow().Return(int64(1234)) baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value()) cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, 1) assert.Equal(1, len(cacheKeys)) @@ -48,7 +48,7 @@ func TestGenerateCacheKeysPrefix(t *testing.T) { timeSource.EXPECT().UnixNow().Return(int64(1234)) baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "prefix:", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value()) cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, 1) assert.Equal(1, len(cacheKeys)) @@ -102,7 +102,7 @@ func TestGetResponseStatusOverLimitWithLocalCache(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 4, 5) // As `isOverLimitWithLocalCache` is passed as `true`, immediate response is returned with no checks of the limits. responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, true, 2) @@ -125,7 +125,7 @@ func TestGetResponseStatusOverLimitWithLocalCacheShadowMode(t *testing.T) { sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) // This limit is in ShadowMode - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true, "", nil)} + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true, "", nil, false)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 4, 5) // As `isOverLimitWithLocalCache` is passed as `true`, immediate response is returned with no checks of the limits. responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, true, 2) @@ -149,7 +149,7 @@ func TestGetResponseStatusOverLimit(t *testing.T) { localCache := freecache.NewCache(100) sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, localCache, 0.8, "", sm) - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 7, 4, 5) responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) assert.Equal(pb.RateLimitResponse_OVER_LIMIT, responseStatus.GetCode()) @@ -175,7 +175,7 @@ func TestGetResponseStatusOverLimitShadowMode(t *testing.T) { sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, localCache, 0.8, "", sm) // Key is in shadow_mode: true - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true, "", nil)} + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true, "", nil, false)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 7, 4, 5) responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) @@ -197,7 +197,7 @@ func TestGetResponseStatusBelowLimit(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 9, 10) responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) @@ -218,7 +218,7 @@ func TestGetResponseStatusBelowLimitShadowMode(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true, "", nil)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true, "", nil, false)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 9, 10) responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) diff --git a/test/memcached/cache_impl_test.go b/test/memcached/cache_impl_test.go index d70346ce..14022ecb 100644 --- a/test/memcached/cache_impl_test.go +++ b/test/memcached/cache_impl_test.go @@ -53,7 +53,7 @@ func TestMemcached(t *testing.T) { client.EXPECT().Increment("domain_key_value_1234", uint64(1)).Return(uint64(5), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -77,7 +77,7 @@ func TestMemcached(t *testing.T) { }, 1) limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false, "", nil), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false, "", nil, false), } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -111,8 +111,8 @@ func TestMemcached(t *testing.T) { {{"key3", "value3"}, {"subkey3", "subvalue3"}}, }, 1) limits = []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false, false, "", nil), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false, "", nil), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false, false, "", nil, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false, "", nil, false), } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -150,7 +150,7 @@ func TestMemcachedGetError(t *testing.T) { client.EXPECT().Increment("domain_key_value_1234", uint64(1)).Return(uint64(5), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -168,7 +168,7 @@ func TestMemcachedGetError(t *testing.T) { client.EXPECT().Increment("domain_key_value1_1234", uint64(1)).Return(uint64(5), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value1"}}}, 1) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value1"), false, false, "", nil)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value1"), false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -241,7 +241,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, "", nil), + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, "", nil, false), } assert.Equal( @@ -342,7 +342,7 @@ func TestNearLimit(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, "", nil), + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, "", nil, false), } assert.Equal( @@ -399,7 +399,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key5_value5_1234", uint64(3)).Return(uint64(5), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"), false, false, "", nil)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"), false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -417,7 +417,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key6_value6_1234", uint64(2)).Return(uint64(7), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) - limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"), false, false, "", nil)} + limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"), false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -435,7 +435,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key7_value7_1234", uint64(3)).Return(uint64(19), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"), false, false, "", nil)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"), false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -453,7 +453,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key8_value8_1234", uint64(3)).Return(uint64(22), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"), false, false, "", nil)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"), false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -471,7 +471,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key9_value9_1234", uint64(7)).Return(uint64(22), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"), false, false, "", nil)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"), false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -489,7 +489,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key10_value10_1234", uint64(3)).Return(uint64(30), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"), false, false, "", nil)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"), false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -533,7 +533,7 @@ func TestMemcacheWithJitter(t *testing.T) { ).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -576,7 +576,7 @@ func TestMemcacheAdd(t *testing.T) { uint64(2), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -600,7 +600,7 @@ func TestMemcacheAdd(t *testing.T) { ).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key2", "value2"}}}, 1) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2"), false, false, "", nil)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2"), false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -673,7 +673,7 @@ func TestMemcachedTracer(t *testing.T) { client.EXPECT().Increment("domain_key_value_1234", uint64(1)).Return(uint64(5), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} cache.DoLimit(context.Background(), request, limits) diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index 28a98719..bc8a9eee 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -49,7 +49,7 @@ func BenchmarkParallelDoLimit(b *testing.B) { cache := redis.NewFixedRateLimitCacheImpl(client, nil, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, "", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} + limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} // wait for the pool to fill up for { diff --git a/test/redis/fixed_cache_impl_test.go b/test/redis/fixed_cache_impl_test.go index a4900cba..eb025ab5 100644 --- a/test/redis/fixed_cache_impl_test.go +++ b/test/redis/fixed_cache_impl_test.go @@ -69,7 +69,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -94,7 +94,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { }, 1) limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false, "", nil), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false, "", nil, false), } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -124,8 +124,8 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { {{"key3", "value3"}, {"subkey3", "subvalue3"}}, }, 1) limits = []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false, false, "", nil), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false, "", nil), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false, false, "", nil, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false, "", nil, false), } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -204,7 +204,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, "", nil), + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, "", nil, false), } assert.Equal( @@ -304,7 +304,7 @@ func TestNearLimit(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, "", nil), + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, "", nil, false), } assert.Equal( @@ -360,7 +360,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"), false, false, "", nil)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"), false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -377,7 +377,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) - limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"), false, false, "", nil)} + limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"), false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -394,7 +394,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"), false, false, "", nil)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"), false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -411,7 +411,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"), false, false, "", nil)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"), false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -428,7 +428,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"), false, false, "", nil)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"), false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -445,7 +445,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"), false, false, "", nil)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"), false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -475,7 +475,7 @@ func TestRedisWithJitter(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -510,7 +510,7 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, true, "", nil), + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, true, "", nil, false), } assert.Equal( @@ -615,7 +615,7 @@ func TestRedisTracer(t *testing.T) { client.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} cache.DoLimit(context.Background(), request, limits) spanStubs := testSpanExporter.GetSpans() diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index faddbbdb..64c95568 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -145,7 +145,7 @@ func TestService(test *testing.T) { request = common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil, false), nil, } t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) @@ -181,7 +181,7 @@ func TestService(test *testing.T) { // Config should still be valid. Also make sure order does not affect results. limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil, false), } t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[1]).Return(limits[1]) @@ -234,7 +234,7 @@ func TestServiceGlobalShadowMode(test *testing.T) { // Global Shadow mode limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil, false), nil, } t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) @@ -274,8 +274,8 @@ func TestRuleShadowMode(test *testing.T) { request := common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true, "", nil), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true, "", nil), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true, "", nil, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true, "", nil, false), } t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[1]).Return(limits[1]) @@ -307,8 +307,8 @@ func TestMixedRuleShadowMode(test *testing.T) { request := common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true, "", nil), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true, "", nil, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil, false), } t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[1]).Return(limits[1]) @@ -366,7 +366,7 @@ func TestServiceWithCustomRatelimitHeaders(test *testing.T) { request := common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil, false), nil, } t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) @@ -418,7 +418,7 @@ func TestServiceWithDefaultRatelimitHeaders(test *testing.T) { request := common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil, false), nil, } t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) @@ -478,7 +478,7 @@ func TestCacheError(test *testing.T) { service := t.setupBasicService() request := common.NewRateLimitRequest("different-domain", [][][2]string{{{"foo", "bar"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil, false)} t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) t.cache.EXPECT().DoLimit(context.Background(), request, limits).Do( func(context.Context, *pb.RateLimitRequest, []*config.RateLimit) { @@ -522,9 +522,9 @@ func TestUnlimited(test *testing.T) { request := common.NewRateLimitRequest( "some-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}, {{"baz", "qux"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("foo_bar"), false, false, "", nil), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("foo_bar"), false, false, "", nil, false), nil, - config.NewRateLimit(55, pb.RateLimitResponse_RateLimit_SECOND, t.statsManager.NewStats("baz_qux"), true, false, "", nil), + config.NewRateLimit(55, pb.RateLimitResponse_RateLimit_SECOND, t.statsManager.NewStats("baz_qux"), true, false, "", nil, false), } t.config.EXPECT().GetLimit(context.Background(), "some-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(context.Background(), "some-domain", request.Descriptors[1]).Return(limits[1]) From 1a686864369fa7b15d2c638ea641ad321bba9024 Mon Sep 17 00:00:00 2001 From: Renuka Piyumal Fernando Date: Tue, 7 Feb 2023 23:27:50 +0530 Subject: [PATCH 054/181] Dynamic config update via xDS Management Server (#373) Signed-off-by: Renuka Fernando --- README.md | 75 +++- .../config/ratelimit/v3/rls_conf.proto | 104 ++++++ docker-compose-example.yml | 16 + examples/xds-sotw-config-server/Dockerfile | 11 + examples/xds-sotw-config-server/README.md | 17 + examples/xds-sotw-config-server/go.mod | 20 ++ examples/xds-sotw-config-server/go.sum | 73 ++++ examples/xds-sotw-config-server/logger.go | 27 ++ examples/xds-sotw-config-server/main/main.go | 54 +++ examples/xds-sotw-config-server/resource.go | 170 +++++++++ examples/xds-sotw-config-server/server.go | 49 +++ go.mod | 26 +- go.sum | 77 ++-- src/config/config.go | 4 +- src/config/config_impl.go | 81 +++-- src/config/config_xds.go | 49 +++ src/config_check_cmd/main.go | 3 +- src/provider/file_provider.go | 118 +++++++ src/provider/provider.go | 29 ++ src/provider/xds_grpc_sotw_provider.go | 182 ++++++++++ src/server/server.go | 7 +- src/server/server_impl.go | 53 +-- src/service/ratelimit.go | 77 ++-- src/service_cmd/runner/runner.go | 6 +- src/settings/settings.go | 37 ++ test/common/xds_sotw.go | 70 ++++ test/config/config_test.go | 3 +- test/integration/integration_test.go | 89 +++-- test/integration/xds_sotw_integration_test.go | 182 ++++++++++ test/mocks/provider/provider.go | 101 ++++++ test/provider/xds_grpc_sotw_provider_test.go | 330 ++++++++++++++++++ test/service/ratelimit_test.go | 114 +++--- 32 files changed, 2005 insertions(+), 249 deletions(-) create mode 100644 api/ratelimit/config/ratelimit/v3/rls_conf.proto create mode 100644 examples/xds-sotw-config-server/Dockerfile create mode 100644 examples/xds-sotw-config-server/README.md create mode 100644 examples/xds-sotw-config-server/go.mod create mode 100644 examples/xds-sotw-config-server/go.sum create mode 100644 examples/xds-sotw-config-server/logger.go create mode 100644 examples/xds-sotw-config-server/main/main.go create mode 100644 examples/xds-sotw-config-server/resource.go create mode 100644 examples/xds-sotw-config-server/server.go create mode 100644 src/config/config_xds.go create mode 100644 src/provider/file_provider.go create mode 100644 src/provider/provider.go create mode 100644 src/provider/xds_grpc_sotw_provider.go create mode 100644 test/common/xds_sotw.go create mode 100644 test/integration/xds_sotw_integration_test.go create mode 100644 test/mocks/provider/provider.go create mode 100644 test/provider/xds_grpc_sotw_provider_test.go diff --git a/README.md b/README.md index ace942c3..c114de1a 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,8 @@ - [API Deprecation History](#api-deprecation-history) - [Building and Testing](#building-and-testing) - [Docker-compose setup](#docker-compose-setup) - - [Full test environment](#full-test-environment) + - [Full test environment - Configure rate limits through files](#full-test-environment---configure-rate-limits-through-files) + - [Full test environment - Configure rate limits through an xDS Management Server](#full-test-environment---configure-rate-limits-through-an-xds-management-server) - [Self-contained end-to-end integration test](#self-contained-end-to-end-integration-test) - [Configuration](#configuration) - [The configuration format](#the-configuration-format) @@ -27,6 +28,8 @@ - [Example 7](#example-7) - [Example 8](#example-8) - [Loading Configuration](#loading-configuration) + - [File Based Configuration Loading](#file-based-configuration-loading) + - [xDS Management Server Based Configuration Loading](#xds-management-server-based-configuration-loading) - [Log Format](#log-format) - [GRPC Keepalive](#grpc-keepalive) - [Request Fields](#request-fields) @@ -131,11 +134,12 @@ If you want to run with [two redis instances](#two-redis-instances), you will ne the docker-compose.yml file to run a second redis container, and change the environment variables as explained in the [two redis instances](#two-redis-instances) section. -## Full test environment +## Full test environment - Configure rate limits through files To run a fully configured environment to demo Envoy based rate limiting, run: ```bash +export CONFIG_TYPE=FILE docker-compose -f docker-compose-example.yml up --build --remove-orphans ``` @@ -162,6 +166,36 @@ To see the metrics in the example curl http://localhost:9102/metrics | grep -i shadow ``` +## Full test environment - Configure rate limits through an xDS Management Server + +To run a fully configured environment to demo Envoy based rate limiting, run: + +```bash +export CONFIG_TYPE=GRPC_XDS_SOTW +docker-compose -f docker-compose-example.yml --profile xds-config up --build --remove-orphans +``` + +This will run in `xds-config` docker-compose profile which will run example xDS-Server, ratelimit, redis, prom-statsd-exporter and two Envoy containers such that you can demo rate limiting by hitting the below endpoints. + +```bash +curl localhost:8888/test +curl localhost:8888/header -H "foo: foo" # Header based +curl localhost:8888/twoheader -H "foo: foo" -H "bar: bar" # Two headers +curl localhost:8888/twoheader -H "foo: foo" -H "baz: baz" # This will be rate limited +curl localhost:8888/twoheader -H "foo: foo" -H "bar: banned" # Ban a particular header value +curl localhost:8888/twoheader -H "foo: foo" -H "baz: shady" # This will never be ratelimited since "baz" with value "shady" is in shadow_mode +curl localhost:8888/twoheader -H "foo: foo" -H "baz: not-so-shady" # This is subject to rate-limiting because the it's now in shadow_mode +``` + +Edit[`examples/xds-sotw-config-server/resource.go`](examples/xds-sotw-config-server/resource.go) to test different rate limit configs. + +To see the metrics in the example + +```bash +# The metrics for the shadow_mode keys +curl http://localhost:9102/metrics | grep -i shadow +``` + ## Self-contained end-to-end integration test Integration tests are coded as bash-scripts in `integration-test/scripts`. @@ -538,6 +572,18 @@ rather than the normal ## Loading Configuration +Rate limit service supports following configuration loading methods. You can define which methods to use by configuring environment variable `CONFIG_TYPE`. + +| Config Loading Method | Value for Environment Variable `CONFIG_TYPE` | +| --------------------------------------------------------------------------------- | -------------------------------------------- | +| [File Based Configuration Loading](#file-based-configuration-loading) | `FILE` (Default) | +| [xDS Server Based Configuration Loading](#xds-server-based-configuration-loading) | `GRPC_XDS_SOTW` | + +When the environment variable `FORCE_START_WITHOUT_INITIAL_CONFIG` set to `false`, the Rate limit service will wait for initial rate limit configuration before +starting the server (gRPC, Rest server endpoints). When set to `true` the server will start even without initial configuration. + +### File Based Configuration Loading + The Ratelimit service uses a library written by Lyft called [goruntime](https://github.com/lyft/goruntime) to do configuration loading. Goruntime monitors a designated path, and watches for symlink swaps to files in the directory tree to reload configuration files. @@ -571,6 +617,31 @@ For more information on how runtime works you can read its [README](https://gith By default it is not possible to define multiple configuration files within `RUNTIME_SUBDIRECTORY` referencing the same domain. To enable this behavior set `MERGE_DOMAIN_CONFIG` to `true`. +### xDS Management Server Based Configuration Loading + +xDS Management Server is a gRPC server which implements the [Aggregated Discovery Service (ADS)](https://github.com/envoyproxy/data-plane-api/blob/97b6dae39046f7da1331a4dc57830d20e842fc26/envoy/service/discovery/v3/ads.proto). +The xDS Management server serves [Discovery Response](https://github.com/envoyproxy/data-plane-api/blob/97b6dae39046f7da1331a4dc57830d20e842fc26/envoy/service/discovery/v3/discovery.proto#L69) with [Ratelimit Configuration Resources](api/ratelimit/config/ratelimit/v3/rls_conf.proto) +and with Type URL `"type.googleapis.com/ratelimit.config.ratelimit.v3.RateLimitConfig"`. +The xDS client in the Rate limit service configure Rate limit service with the provided configuration. +For more information on xDS protocol please refer to the [envoy proxy documentation](https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol). + +You can refer to [the sample xDS configuration management server](examples/xds-sotw-config-server/README.md). + +The xDS server for listening for configuration can be set via [settings](https://github.com/envoyproxy/ratelimit/blob/master/src/settings/settings.go) +package with the following environment variables: + +``` +CONFIG_GRPC_XDS_NODE_ID default:"default" +CONFIG_GRPC_XDS_SERVER_URL default:"localhost:18000" +CONFIG_GRPC_XDS_SERVER_CONNECT_RETRY_INTERVAL default:"3s" +``` + +As well Ratelimit supports TLS connections, these can be configured using the following environment variables: + +1. `CONFIG_GRPC_XDS_SERVER_USE_TLS`: set to `"true"` to enable a TLS connection with the xDS configuration management server. +2. `CONFIG_GRPC_XDS_CLIENT_TLS_CERT`, `CONFIG_GRPC_XDS_CLIENT_TLS_KEY`, and `CONFIG_GRPC_XDS_SERVER_TLS_CACERT` to provides files to specify a TLS connection configuration to the xDS configuration management server. +3. `CONFIG_GRPC_XDS_SERVER_TLS_SAN`: (Optional) Override the SAN value to validate from the server certificate. + ## Log Format A centralized log collection system works better with logs in json format. JSON format avoids the need for custom parsing rules. diff --git a/api/ratelimit/config/ratelimit/v3/rls_conf.proto b/api/ratelimit/config/ratelimit/v3/rls_conf.proto new file mode 100644 index 00000000..cdb1836f --- /dev/null +++ b/api/ratelimit/config/ratelimit/v3/rls_conf.proto @@ -0,0 +1,104 @@ +syntax = "proto3"; + +package ratelimit.config.ratelimit.v3; + +option java_package = "io.envoyproxy.ratelimit.config.ratelimit.v3"; +option java_outer_classname = "RlsConfigProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/ratelimit/config/ratelimit/v3;ratelimitv3"; + +// [#protodoc-title: Rate limit service configuration] +// A management server which supports ADS (Aggregated Discovery Service - SotW or delta protocol) can apply +// rate limit service configuration using the message type RateLimitConfig. The ADS client within the rate limit service +// will stream Discovery Request with the resource type URL "type.googleapis.com/ratelimit.config.ratelimit.v3.RateLimitConfig". +// The ADS management server should respond stream of Discovery Response with the same type URL and array of RateLimitConfigs +// within resources of the Discovery Response. + +// Rate limit configuration for a single domain. +message RateLimitConfig { + // Name of the rate limit configuration. This should be unique for each configuration. + string name = 1; + + // Domain name for the rate limit configuration. + string domain = 2; + + // List of rate limit configuration descriptors. + repeated RateLimitDescriptor descriptors = 3; +} + +// Rate limit configuration descriptor. +message RateLimitDescriptor { + // Key of the descriptor. + string key = 1; + + // Optional value of the descriptor. + string value = 2; + + // Rate limit policy of the descriptor. + RateLimitPolicy rate_limit = 3; + + // List of sub rate limit descriptors. + repeated RateLimitDescriptor descriptors = 4; + + // Mark the descriptor as shadow. When the values is true, rate limit service allow requests to the backend. + bool shadow_mode = 5; +} + +// Rate-limit policy. +message RateLimitPolicy { + // Unit of time for the rate limit. + RateLimitUnit unit = 1; + + // Number of requests allowed in the policy within `unit` time. + uint32 requests_per_unit = 2; + + // Mark the rate limit policy as unlimited. All requests are allowed to the backend. + bool unlimited = 3; + + // Optional name for the rate limit policy. Name the policy, if it should be replaced (dropped evaluation) by + // another policy. + string name = 4; + + // List of rate limit policies, this rate limit policy will replace (drop evaluation) + // For more information: https://github.com/envoyproxy/ratelimit/tree/0b2f4d5fb04bf55e1873e2c5e2bb28da67c0643f#replaces + // Example: https://github.com/envoyproxy/ratelimit/tree/0b2f4d5fb04bf55e1873e2c5e2bb28da67c0643f#example-7 + repeated RateLimitReplace replaces = 5; +} + +// Replace specifies the rate limit policy that should be replaced (dropped evaluation). +// For more information: https://github.com/envoyproxy/ratelimit/tree/0b2f4d5fb04bf55e1873e2c5e2bb28da67c0643f#replaces +message RateLimitReplace { + // Name of the rate limit policy, that is being replaced (dropped evaluation). + string name = 1; +} + +// Identifies the unit of of time for rate limit. +enum RateLimitUnit { + // The time unit is not known. + UNKNOWN = 0; + + // The time unit representing a second. + SECOND = 1; + + // The time unit representing a minute. + MINUTE = 2; + + // The time unit representing an hour. + HOUR = 3; + + // The time unit representing a day. + DAY = 4; +} + +// [#protodoc-title: Rate Limit Config Discovery Service (RLS Conf DS)] + +// Return list of all rate limit configs that rate limit service should be configured with. +service RateLimitConfigDiscoveryService { + rpc StreamRlsConfigs(stream envoy.service.discovery.v3.DiscoveryRequest) + returns (stream envoy.service.discovery.v3.DiscoveryResponse) { + } + + rpc FetchRlsConfigs(envoy.service.discovery.v3.DiscoveryRequest) + returns (envoy.service.discovery.v3.DiscoveryResponse) { + } +} diff --git a/docker-compose-example.yml b/docker-compose-example.yml index ba4cc69e..242a5011 100644 --- a/docker-compose-example.yml +++ b/docker-compose-example.yml @@ -49,6 +49,22 @@ services: - RUNTIME_ROOT=/data - RUNTIME_SUBDIRECTORY=ratelimit - RUNTIME_WATCH_ROOT=false + - CONFIG_TYPE=${CONFIG_TYPE:-FILE} + - CONFIG_GRPC_XDS_NODE_ID=test-node-id + - CONFIG_GRPC_XDS_SERVER_URL=ratelimit-xds-config-server:18000 + + ratelimit-xds-config-server: + image: ratelimit-xds-config-server:latest + build: + context: examples/xds-sotw-config-server + dockerfile: Dockerfile + command: ["-nodeID", "test-node-id", "-port", "18000", "-debug", "true"] + expose: + - 18000 + networks: + - ratelimit-network + profiles: + - xds-config envoy-proxy: image: envoyproxy/envoy-dev:latest diff --git a/examples/xds-sotw-config-server/Dockerfile b/examples/xds-sotw-config-server/Dockerfile new file mode 100644 index 00000000..52439213 --- /dev/null +++ b/examples/xds-sotw-config-server/Dockerfile @@ -0,0 +1,11 @@ +FROM golang:1.18 AS build +WORKDIR /xds-server + +COPY . . + +RUN CGO_ENABLED=0 GOOS=linux go build -o /go/bin/xds-server -v main/main.go + +FROM alpine:3.16 AS final +RUN apk --no-cache add ca-certificates && apk --no-cache update +COPY --from=build /go/bin/xds-server /bin/xds-server +ENTRYPOINT [ "/bin/xds-server" ] diff --git a/examples/xds-sotw-config-server/README.md b/examples/xds-sotw-config-server/README.md new file mode 100644 index 00000000..4586cf14 --- /dev/null +++ b/examples/xds-sotw-config-server/README.md @@ -0,0 +1,17 @@ +# Example Rate-limit Configuration SotW xDS Server + +This is an example of a trivial xDS V3 control plane server similar to the example server in [go-control-plane](https://github.com/envoyproxy/go-control-plane/tree/main/internal/example). It serves sample Rate limit configuration. You can run the example using the project top-level docker-compose-example.yml, e.g.: + +```bash +export CONFIG_TYPE=GRPC_XDS_SOTW +docker-compose -f docker-compose-example.yml --profile xds-config up --build --remove-orphans +``` + +The docker-compose builds and runs the example server along with Rate limit server. The example server serves a configuration defined in [`resource.go`](resource.go). If everything works correctly, you can follow the [examples in project top-level README.md file](../../README.md#examples). + +## Files + +- [main/main.go](main/main.go) is the example program entrypoint. It instantiates the cache and xDS server and runs the xDS server process. +- [resource.go](resource.go) generates a `Snapshot` structure which describes the configuration that the xDS server serves to Envoy. +- [server.go](server.go) runs the xDS control plane server. +- [logger.go](logger.go) is the logger. diff --git a/examples/xds-sotw-config-server/go.mod b/examples/xds-sotw-config-server/go.mod new file mode 100644 index 00000000..7c5ac4cd --- /dev/null +++ b/examples/xds-sotw-config-server/go.mod @@ -0,0 +1,20 @@ +module github.com/envoyproxy/ratelimit/examples/xds-sotw-config-server + +go 1.18 + +require ( + github.com/envoyproxy/go-control-plane v0.10.3-0.20230127155013-72157d335c8f + google.golang.org/grpc v1.52.0 +) + +require ( + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc // indirect + github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect + github.com/golang/protobuf v1.5.2 // indirect + golang.org/x/net v0.4.0 // indirect + golang.org/x/sys v0.3.0 // indirect + golang.org/x/text v0.5.0 // indirect + google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect + google.golang.org/protobuf v1.28.1 // indirect +) diff --git a/examples/xds-sotw-config-server/go.sum b/examples/xds-sotw-config-server/go.sum new file mode 100644 index 00000000..d82360b9 --- /dev/null +++ b/examples/xds-sotw-config-server/go.sum @@ -0,0 +1,73 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc h1:PYXxkRUBGUMa5xgMVMDl62vEklZvKpVaxQeN9ie7Hfk= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.10.3-0.20230127155013-72157d335c8f h1:nqACgqiYlDnB0znidh+8uhnQVLeqfW5NyyRfnGibowc= +github.com/envoyproxy/go-control-plane v0.10.3-0.20230127155013-72157d335c8f/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk= +google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/examples/xds-sotw-config-server/logger.go b/examples/xds-sotw-config-server/logger.go new file mode 100644 index 00000000..bbfeadde --- /dev/null +++ b/examples/xds-sotw-config-server/logger.go @@ -0,0 +1,27 @@ +package example + +import ( + "log" +) + +type Logger struct { + Debug bool +} + +func (logger Logger) Debugf(format string, args ...interface{}) { + if logger.Debug { + log.Printf("[DEBUG] "+format+"\n", args...) + } +} + +func (logger Logger) Infof(format string, args ...interface{}) { + log.Printf("[INFO]"+format+"\n", args...) +} + +func (logger Logger) Warnf(format string, args ...interface{}) { + log.Printf("[WARN] "+format+"\n", args...) +} + +func (logger Logger) Errorf(format string, args ...interface{}) { + log.Printf("[ERROR]"+format+"\n", args...) +} diff --git a/examples/xds-sotw-config-server/main/main.go b/examples/xds-sotw-config-server/main/main.go new file mode 100644 index 00000000..82da545f --- /dev/null +++ b/examples/xds-sotw-config-server/main/main.go @@ -0,0 +1,54 @@ +package main + +import ( + "context" + "flag" + "os" + + "github.com/envoyproxy/go-control-plane/pkg/cache/v3" + "github.com/envoyproxy/go-control-plane/pkg/server/v3" + "github.com/envoyproxy/go-control-plane/pkg/test/v3" + + example "github.com/envoyproxy/ratelimit/examples/xds-sotw-config-server" +) + +var ( + logger example.Logger + port uint + nodeID string +) + +func init() { + logger = example.Logger{} + + flag.BoolVar(&logger.Debug, "debug", false, "Enable xDS server debug logging") + flag.UintVar(&port, "port", 18000, "xDS management server port") + flag.StringVar(&nodeID, "nodeID", "test-node-id", "Node ID") +} + +func main() { + flag.Parse() + + // Create a cache + cache := cache.NewSnapshotCache(false, cache.IDHash{}, logger) + + // Create the snapshot that we'll serve to Envoy + snapshot := example.GenerateSnapshot() + if err := snapshot.Consistent(); err != nil { + logger.Errorf("Snapshot is inconsistent: %+v\n%+v", snapshot, err) + os.Exit(1) + } + logger.Debugf("Will serve snapshot %+v", snapshot) + + // Add the snapshot to the cache + if err := cache.SetSnapshot(context.Background(), nodeID, snapshot); err != nil { + logger.Errorf("Snapshot error %q for %+v", err, snapshot) + os.Exit(1) + } + + // Run the xDS server + ctx := context.Background() + cb := &test.Callbacks{Debug: logger.Debug} + srv := server.NewServer(ctx, cache, cb) + example.RunServer(ctx, srv, port) +} diff --git a/examples/xds-sotw-config-server/resource.go b/examples/xds-sotw-config-server/resource.go new file mode 100644 index 00000000..71df6324 --- /dev/null +++ b/examples/xds-sotw-config-server/resource.go @@ -0,0 +1,170 @@ +package example + +import ( + "github.com/envoyproxy/go-control-plane/pkg/cache/types" + "github.com/envoyproxy/go-control-plane/pkg/cache/v3" + "github.com/envoyproxy/go-control-plane/pkg/resource/v3" + rls_config "github.com/envoyproxy/go-control-plane/ratelimit/config/ratelimit/v3" +) + +func makeRlsConfig() []types.Resource { + return []types.Resource{ + &rls_config.RateLimitConfig{ + Name: "mongo_cps", + Domain: "mongo_cps", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "database", + Value: "users", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_SECOND, + RequestsPerUnit: 500, + }, + }, + { + Key: "database", + Value: "default", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_SECOND, + RequestsPerUnit: 500, + }, + }, + }, + }, + &rls_config.RateLimitConfig{ + Name: "rl", + Domain: "rl", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "category", + Value: "account", + RateLimit: &rls_config.RateLimitPolicy{ + Replaces: []*rls_config.RateLimitReplace{{Name: "bkthomps"}, {Name: "fake_name"}}, + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 4, + }, + }, + { + Key: "source_cluster", + Value: "proxy", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "destination_cluster", + Value: "bkthomps", + RateLimit: &rls_config.RateLimitPolicy{ + Replaces: []*rls_config.RateLimitReplace{{Name: "bkthomps"}}, + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 2, + }, + }, + { + Key: "destination_cluster", + Value: "mock", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 1, + }, + }, + { + Key: "destination_cluster", + Value: "override", + RateLimit: &rls_config.RateLimitPolicy{ + Replaces: []*rls_config.RateLimitReplace{{Name: "banned_limit"}}, + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 2, + }, + }, + { + Key: "destination_cluster", + Value: "fake", + RateLimit: &rls_config.RateLimitPolicy{ + Name: "fake_name", + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 2, + }, + }, + }, + }, + { + Key: "foo", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 2, + }, + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "bar", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 3, + }, + }, + { + Key: "bar", + Value: "bkthomps", + RateLimit: &rls_config.RateLimitPolicy{ + Name: "bkthomps", + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 1, + }, + }, + { + Key: "bar", + Value: "banned", + RateLimit: &rls_config.RateLimitPolicy{ + Name: "banned_limit", + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 0, + }, + }, + { + Key: "baz", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_SECOND, + RequestsPerUnit: 1, + }, + }, + { + Key: "baz", + Value: "not-so-shady", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 3, + }, + }, + { + Key: "baz", + Value: "shady", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 3, + }, + ShadowMode: true, + }, + { + Key: "bay", + RateLimit: &rls_config.RateLimitPolicy{ + Unlimited: true, + }, + }, + }, + }, + { + Key: "qux", + RateLimit: &rls_config.RateLimitPolicy{ + Unlimited: true, + }, + }, + }, + }, + } +} + +func GenerateSnapshot() *cache.Snapshot { + snap, _ := cache.NewSnapshot("1", + map[resource.Type][]types.Resource{ + resource.RateLimitConfigType: makeRlsConfig(), + }, + ) + return snap +} diff --git a/examples/xds-sotw-config-server/server.go b/examples/xds-sotw-config-server/server.go new file mode 100644 index 00000000..0e120b58 --- /dev/null +++ b/examples/xds-sotw-config-server/server.go @@ -0,0 +1,49 @@ +package example + +import ( + "context" + "fmt" + "log" + "net" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" + + discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + "github.com/envoyproxy/go-control-plane/pkg/server/v3" +) + +const ( + grpcKeepaliveTime = 30 * time.Second + grpcKeepaliveTimeout = 5 * time.Second + grpcKeepaliveMinTime = 30 * time.Second + grpcMaxConcurrentStreams = 1000000 +) + +// RunServer starts an xDS server at the given port. +func RunServer(ctx context.Context, srv server.Server, port uint) { + grpcServer := grpc.NewServer( + grpc.MaxConcurrentStreams(grpcMaxConcurrentStreams), + grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: grpcKeepaliveTime, + Timeout: grpcKeepaliveTimeout, + }), + grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: grpcKeepaliveMinTime, + PermitWithoutStream: true, + }), + ) + + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) + if err != nil { + log.Fatal(err) + } + + discovery.RegisterAggregatedDiscoveryServiceServer(grpcServer, srv) + + log.Printf("Management server listening on %d\n", port) + if err = grpcServer.Serve(lis); err != nil { + log.Println(err) + } +} diff --git a/go.mod b/go.mod index 6506dba4..3ed66726 100644 --- a/go.mod +++ b/go.mod @@ -6,45 +6,47 @@ require ( github.com/alicebob/miniredis/v2 v2.23.0 github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b github.com/coocood/freecache v1.1.0 - github.com/envoyproxy/go-control-plane v0.10.1 + github.com/envoyproxy/go-control-plane v0.10.3-0.20230127155013-72157d335c8f github.com/golang/mock v1.4.4 github.com/golang/protobuf v1.5.2 github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/kavu/go_reuseport v1.2.0 github.com/kelseyhightower/envconfig v1.4.0 github.com/lyft/goruntime v0.3.0 github.com/lyft/gostats v0.4.1 github.com/mediocregopher/radix/v3 v3.8.1 github.com/sirupsen/logrus v1.6.0 - github.com/stretchr/testify v1.7.1 + github.com/stretchr/testify v1.8.1 golang.org/x/net v0.4.0 - google.golang.org/grpc v1.45.0 + google.golang.org/grpc v1.52.0 + google.golang.org/protobuf v1.28.1 gopkg.in/yaml.v2 v2.3.0 ) require ( github.com/cenkalti/backoff/v4 v4.1.2 // indirect - github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.6.3 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect - google.golang.org/protobuf v1.28.0 // indirect - gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) require ( github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect + github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect github.com/fsnotify/fsnotify v1.4.9 // indirect github.com/google/uuid v1.3.0 github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/objx v0.2.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 go.opentelemetry.io/otel v1.7.0 @@ -53,7 +55,7 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.6.3 go.opentelemetry.io/otel/sdk v1.7.0 go.opentelemetry.io/otel/trace v1.7.0 - go.opentelemetry.io/proto/otlp v0.16.0 // indirect + go.opentelemetry.io/proto/otlp v0.19.0 // indirect golang.org/x/sys v0.3.0 // indirect golang.org/x/text v0.5.0 // indirect ) diff --git a/go.sum b/go.sum index 79af9ed7..efe276bd 100644 --- a/go.sum +++ b/go.sum @@ -12,14 +12,16 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= +cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -46,6 +48,8 @@ github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQ github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -59,9 +63,9 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc h1:PYXxkRUBGUMa5xgMVMDl62vEklZvKpVaxQeN9ie7Hfk= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coocood/freecache v1.1.0 h1:ENiHOsWdj1BrrlPwblhbn4GdAsMymK3pZORJ+bJGAjA= github.com/coocood/freecache v1.1.0/go.mod h1:ePwxCDzOYvARfHdr1pByNct1at3CoKnsipOHwKlNbzI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -73,21 +77,26 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.1 h1:cgDRLG7bs59Zd+apAWuzLQL95obVYAymNJek76W3mgw= -github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= -github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/envoyproxy/go-control-plane v0.10.3-0.20230127155013-72157d335c8f h1:nqACgqiYlDnB0znidh+8uhnQVLeqfW5NyyRfnGibowc= +github.com/envoyproxy/go-control-plane v0.10.3-0.20230127155013-72157d335c8f/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= @@ -130,8 +139,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -149,9 +158,12 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 h1:VQjjMh+uElTfioy6GnUrVrTMAiLTNF3xsrAlSwC+g8o= github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 h1:lLT7ZLSzGLI08vc9cpd+tYmNWjdKDqyr/2L+f6U12Fk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -161,9 +173,12 @@ github.com/kavu/go_reuseport v1.2.0 h1:YO+pt6m5Z3WkVH9DjaDJzoSS/0FO2Q8x3CfObxk/i github.com/kavu/go_reuseport v1.2.0/go.mod h1:CG8Ee7ceMFSMnx/xr25Vm0qXaj2Z4i5PWoUx+JZ5/CU= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -175,28 +190,35 @@ github.com/lyft/gostats v0.4.1 h1:oR6p4HRCGxt0nUntmZIWmYMgyothBi3eZH2A71vRjsc= github.com/lyft/gostats v0.4.1/go.mod h1:Tpx2xRzz4t+T2Tx0xdVgIoBdR2UMVz+dKnE3X01XSd8= github.com/mediocregopher/radix/v3 v3.8.1 h1:rOkHflVuulFKlwsLY01/M2cM2tWCjDoETcMqKbAWu1M= github.com/mediocregopher/radix/v3 v3.8.1/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= @@ -228,10 +250,13 @@ go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJ go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.opentelemetry.io/proto/otlp v0.16.0 h1:WHzDWdXUvbc5bG2ObdrGfaNpQz7ft7QN9HHmJlbiB1E= -go.opentelemetry.io/proto/otlp v0.16.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -303,8 +328,8 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -313,6 +338,7 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -344,7 +370,6 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -399,15 +424,18 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -429,8 +457,8 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -452,6 +480,7 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -461,8 +490,9 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 h1:b9mVrqYfq3P4bCdaLg1qtBnPzUYgglsIdjZkL/fQVOE= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -479,8 +509,9 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk= +google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -494,8 +525,9 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -504,8 +536,9 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/src/config/config.go b/src/config/config.go index 0dd6d803..86f941ef 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -42,8 +42,8 @@ type RateLimitConfig interface { // Information for a config file to load into the aggregate config. type RateLimitConfigToLoad struct { - Name string - FileBytes string + Name string + ConfigYaml *YamlRoot } // Interface for loading a configuration from a list of YAML files. diff --git a/src/config/config_impl.go b/src/config/config_impl.go index aa8cef9f..2c9dcbcb 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -110,8 +110,8 @@ func (this *rateLimitDescriptor) dump() string { // Create a new config error which includes the owning file. // @param config supplies the config file that generated the error. // @param err supplies the error string. -func newRateLimitConfigError(config RateLimitConfigToLoad, err string) RateLimitConfigError { - return RateLimitConfigError(fmt.Sprintf("%s: %s", config.Name, err)) +func newRateLimitConfigError(name string, err string) RateLimitConfigError { + return RateLimitConfigError(fmt.Sprintf("%s: %s", name, err)) } // Load a set of config descriptors from the YAML file and check the input. @@ -122,7 +122,7 @@ func newRateLimitConfigError(config RateLimitConfigToLoad, err string) RateLimit func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, parentKey string, descriptors []YamlDescriptor, statsManager stats.Manager) { for _, descriptorConfig := range descriptors { if descriptorConfig.Key == "" { - panic(newRateLimitConfigError(config, "descriptor has empty key")) + panic(newRateLimitConfigError(config.Name, "descriptor has empty key")) } // Value is optional, so the final key for the map is either the key only or key_value. @@ -134,7 +134,7 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p newParentKey := parentKey + finalKey if _, present := this.descriptors[finalKey]; present { panic(newRateLimitConfigError( - config, fmt.Sprintf("duplicate descriptor composite key '%s'", newParentKey))) + config.Name, fmt.Sprintf("duplicate descriptor composite key '%s'", newParentKey))) } var rateLimit *RateLimit = nil @@ -149,12 +149,12 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p if unlimited { if validUnit { panic(newRateLimitConfigError( - config, + config.Name, fmt.Sprintf("should not specify rate limit unit when unlimited"))) } } else if !validUnit { panic(newRateLimitConfigError( - config, + config.Name, fmt.Sprintf("invalid rate limit unit '%s'", descriptorConfig.RateLimit.Unit))) } @@ -174,10 +174,10 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p for _, replaces := range descriptorConfig.RateLimit.Replaces { if replaces.Name == "" { - panic(newRateLimitConfigError(config, "should not have an empty replaces entry")) + panic(newRateLimitConfigError(config.Name, "should not have an empty replaces entry")) } if replaces.Name == descriptorConfig.RateLimit.Name { - panic(newRateLimitConfigError(config, "replaces should not contain name of same descriptor")) + panic(newRateLimitConfigError(config.Name, "replaces should not contain name of same descriptor")) } } } @@ -193,17 +193,17 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p // Validate a YAML config file's keys. // @param config specifies the file contents to load. // @param any specifies the yaml file and a map. -func validateYamlKeys(config RateLimitConfigToLoad, config_map map[interface{}]interface{}) { +func validateYamlKeys(fileName string, config_map map[interface{}]interface{}) { for k, v := range config_map { if _, ok := k.(string); !ok { errorText := fmt.Sprintf("config error, key is not of type string: %v", k) logger.Debugf(errorText) - panic(newRateLimitConfigError(config, errorText)) + panic(newRateLimitConfigError(fileName, errorText)) } if _, ok := validKeys[k.(string)]; !ok { errorText := fmt.Sprintf("config error, unknown key '%s'", k) logger.Debugf(errorText) - panic(newRateLimitConfigError(config, errorText)) + panic(newRateLimitConfigError(fileName, errorText)) } switch v := v.(type) { case []interface{}: @@ -211,13 +211,13 @@ func validateYamlKeys(config RateLimitConfigToLoad, config_map map[interface{}]i if _, ok := e.(map[interface{}]interface{}); !ok { errorText := fmt.Sprintf("config error, yaml file contains list of type other than map: %v", e) logger.Debugf(errorText) - panic(newRateLimitConfigError(config, errorText)) + panic(newRateLimitConfigError(fileName, errorText)) } element := e.(map[interface{}]interface{}) - validateYamlKeys(config, element) + validateYamlKeys(fileName, element) } case map[interface{}]interface{}: - validateYamlKeys(config, v) + validateYamlKeys(fileName, v) // string is a leaf type in ratelimit config. No need to keep validating. case string: // int is a leaf type in ratelimit config. No need to keep validating. @@ -230,40 +230,24 @@ func validateYamlKeys(config RateLimitConfigToLoad, config_map map[interface{}]i default: errorText := fmt.Sprintf("error checking config") logger.Debugf(errorText) - panic(newRateLimitConfigError(config, errorText)) + panic(newRateLimitConfigError(fileName, errorText)) } } } -// Load a single YAML config file into the global config. -// @param config specifies the file contents to load. +// Load a single YAML config into the global config. +// @param config specifies the yamlRoot struct to load. func (this *rateLimitConfigImpl) loadConfig(config RateLimitConfigToLoad) { - // validate keys in config with generic map - any := map[interface{}]interface{}{} - err := yaml.Unmarshal([]byte(config.FileBytes), &any) - if err != nil { - errorText := fmt.Sprintf("error loading config file: %s", err.Error()) - logger.Debugf(errorText) - panic(newRateLimitConfigError(config, errorText)) - } - validateYamlKeys(config, any) - - var root YamlRoot - err = yaml.Unmarshal([]byte(config.FileBytes), &root) - if err != nil { - errorText := fmt.Sprintf("error loading config file: %s", err.Error()) - logger.Debugf(errorText) - panic(newRateLimitConfigError(config, errorText)) - } + root := config.ConfigYaml if root.Domain == "" { - panic(newRateLimitConfigError(config, "config file cannot have empty domain")) + panic(newRateLimitConfigError(config.Name, "config file cannot have empty domain")) } if _, present := this.domains[root.Domain]; present { if !this.mergeDomainConfigs { panic(newRateLimitConfigError( - config, fmt.Sprintf("duplicate domain '%s' in config file", root.Domain))) + config.Name, fmt.Sprintf("duplicate domain '%s' in config file", root.Domain))) } logger.Debugf("patching domain: %s", root.Domain) @@ -366,6 +350,31 @@ func descriptorKey(domain string, descriptor *pb_struct.RateLimitDescriptor) str return domain + "." + rateLimitKey } +// ConfigFileContentToYaml converts a single YAML (string content) into yamlRoot struct with validating yaml keys. +// @param fileName specifies the name of the file. +// @param content specifies the string content of the yaml file. +func ConfigFileContentToYaml(fileName, content string) *YamlRoot { + // validate keys in config with generic map + any := map[interface{}]interface{}{} + err := yaml.Unmarshal([]byte(content), &any) + if err != nil { + errorText := fmt.Sprintf("error loading config file: %s", err.Error()) + logger.Debugf(errorText) + panic(newRateLimitConfigError(fileName, errorText)) + } + validateYamlKeys(fileName, any) + + var root YamlRoot + err = yaml.Unmarshal([]byte(content), &root) + if err != nil { + errorText := fmt.Sprintf("error loading config file: %s", err.Error()) + logger.Debugf(errorText) + panic(newRateLimitConfigError(fileName, errorText)) + } + + return &root +} + // Create rate limit config from a list of input YAML files. // @param configs specifies a list of YAML files to load. // @param stats supplies the stats scope to use for limit stats during runtime. diff --git a/src/config/config_xds.go b/src/config/config_xds.go new file mode 100644 index 00000000..1e772c36 --- /dev/null +++ b/src/config/config_xds.go @@ -0,0 +1,49 @@ +package config + +import ( + rls_conf_v3 "github.com/envoyproxy/go-control-plane/ratelimit/config/ratelimit/v3" +) + +// ConfigXdsProtoToYaml converts Xds Proto format to yamlRoot +func ConfigXdsProtoToYaml(xdsProto *rls_conf_v3.RateLimitConfig) *YamlRoot { + return &YamlRoot{ + Domain: xdsProto.Domain, + Descriptors: rateLimitDescriptorsPbToYaml(xdsProto.Descriptors), + } +} + +func rateLimitDescriptorsPbToYaml(pb []*rls_conf_v3.RateLimitDescriptor) []YamlDescriptor { + descriptors := make([]YamlDescriptor, len(pb)) + for i, d := range pb { + descriptors[i] = YamlDescriptor{ + Key: d.Key, + Value: d.Value, + RateLimit: rateLimitPolicyPbToYaml(d.RateLimit), + Descriptors: rateLimitDescriptorsPbToYaml(d.Descriptors), + ShadowMode: d.ShadowMode, + } + } + + return descriptors +} + +func rateLimitPolicyPbToYaml(pb *rls_conf_v3.RateLimitPolicy) *YamlRateLimit { + if pb == nil { + return nil + } + return &YamlRateLimit{ + RequestsPerUnit: pb.RequestsPerUnit, + Unit: pb.Unit.String(), + Unlimited: pb.Unlimited, + Name: pb.Name, + Replaces: rateLimitReplacesPbToYaml(pb.Replaces), + } +} + +func rateLimitReplacesPbToYaml(pb []*rls_conf_v3.RateLimitReplace) []yamlReplaces { + replaces := make([]yamlReplaces, len(pb)) + for i, r := range pb { + replaces[i] = yamlReplaces{Name: r.Name} + } + return replaces +} diff --git a/src/config_check_cmd/main.go b/src/config_check_cmd/main.go index 750af791..dc313c31 100644 --- a/src/config_check_cmd/main.go +++ b/src/config_check_cmd/main.go @@ -51,7 +51,8 @@ func main() { fmt.Printf("error reading file %s: %s\n", finalPath, err.Error()) os.Exit(1) } - allConfigs = append(allConfigs, config.RateLimitConfigToLoad{finalPath, string(bytes)}) + configYaml := config.ConfigFileContentToYaml(finalPath, string(bytes)) + allConfigs = append(allConfigs, config.RateLimitConfigToLoad{Name: finalPath, ConfigYaml: configYaml}) } loadConfigs(allConfigs, *mergeDomainConfigs) diff --git a/src/provider/file_provider.go b/src/provider/file_provider.go new file mode 100644 index 00000000..07bfa11a --- /dev/null +++ b/src/provider/file_provider.go @@ -0,0 +1,118 @@ +package provider + +import ( + "path/filepath" + "strings" + + "github.com/lyft/goruntime/loader" + gostats "github.com/lyft/gostats" + logger "github.com/sirupsen/logrus" + + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/stats" +) + +type FileProvider struct { + settings settings.Settings + loader config.RateLimitConfigLoader + configUpdateEventChan chan ConfigUpdateEvent + runtime loader.IFace + runtimeUpdateEvent chan int + runtimeWatchRoot bool + rootStore gostats.Store + statsManager stats.Manager +} + +func (p *FileProvider) ConfigUpdateEvent() <-chan ConfigUpdateEvent { + return p.configUpdateEventChan +} + +func (p *FileProvider) Stop() {} + +func (p *FileProvider) watch() { + p.runtime.AddUpdateCallback(p.runtimeUpdateEvent) + + go func() { + p.sendEvent() + // No exit right now. + for { + logger.Debugf("waiting for runtime update") + <-p.runtimeUpdateEvent + logger.Debugf("got runtime update and reloading config") + p.sendEvent() + } + }() +} + +func (p *FileProvider) sendEvent() { + defer func() { + if e := recover(); e != nil { + p.configUpdateEventChan <- &ConfigUpdateEventImpl{err: e} + } + }() + + files := []config.RateLimitConfigToLoad{} + snapshot := p.runtime.Snapshot() + for _, key := range snapshot.Keys() { + if p.runtimeWatchRoot && !strings.HasPrefix(key, "config.") { + continue + } + + configYaml := config.ConfigFileContentToYaml(key, snapshot.Get(key)) + files = append(files, config.RateLimitConfigToLoad{Name: key, ConfigYaml: configYaml}) + } + + rlSettings := settings.NewSettings() + newConfig := p.loader.Load(files, p.statsManager, rlSettings.MergeDomainConfigurations) + + p.configUpdateEventChan <- &ConfigUpdateEventImpl{config: newConfig} +} + +func (p *FileProvider) setupRuntime() { + loaderOpts := make([]loader.Option, 0, 1) + if p.settings.RuntimeIgnoreDotFiles { + loaderOpts = append(loaderOpts, loader.IgnoreDotFiles) + } else { + loaderOpts = append(loaderOpts, loader.AllowDotFiles) + } + var err error + if p.settings.RuntimeWatchRoot { + p.runtime, err = loader.New2( + p.settings.RuntimePath, + p.settings.RuntimeSubdirectory, + p.rootStore.ScopeWithTags("runtime", p.settings.ExtraTags), + &loader.SymlinkRefresher{RuntimePath: p.settings.RuntimePath}, + loaderOpts...) + } else { + directoryRefresher := &loader.DirectoryRefresher{} + // Adding loader.Remove to the default set of goruntime's FileSystemOps. + directoryRefresher.WatchFileSystemOps(loader.Remove, loader.Write, loader.Create, loader.Chmod) + + p.runtime, err = loader.New2( + filepath.Join(p.settings.RuntimePath, p.settings.RuntimeSubdirectory), + "config", + p.rootStore.ScopeWithTags("runtime", p.settings.ExtraTags), + directoryRefresher, + loaderOpts...) + } + + if err != nil { + panic(err) + } +} + +func NewFileProvider(settings settings.Settings, statsManager stats.Manager, rootStore gostats.Store) RateLimitConfigProvider { + p := &FileProvider{ + settings: settings, + loader: config.NewRateLimitConfigLoaderImpl(), + configUpdateEventChan: make(chan ConfigUpdateEvent), + runtimeUpdateEvent: make(chan int), + runtimeWatchRoot: settings.RuntimeWatchRoot, + rootStore: rootStore, + statsManager: statsManager, + } + p.setupRuntime() + go p.watch() + return p +} diff --git a/src/provider/provider.go b/src/provider/provider.go new file mode 100644 index 00000000..a4cade4c --- /dev/null +++ b/src/provider/provider.go @@ -0,0 +1,29 @@ +package provider + +import ( + "github.com/envoyproxy/ratelimit/src/config" +) + +// RateLimitConfigProvider is the interface for configurations providers. +type RateLimitConfigProvider interface { + // ConfigUpdateEvent returns a receive-only channel for retrieve configuration updates + // The provider implementer should send config update to this channel when it detect a config update + // Config receiver waits for configuration updates + ConfigUpdateEvent() <-chan ConfigUpdateEvent + + // Stop stops the configuration provider watch for configurations. + Stop() +} + +type ConfigUpdateEvent interface { + GetConfig() (config config.RateLimitConfig, err any) +} + +type ConfigUpdateEventImpl struct { + config config.RateLimitConfig + err any +} + +func (e *ConfigUpdateEventImpl) GetConfig() (config config.RateLimitConfig, err any) { + return e.config, e.err +} diff --git a/src/provider/xds_grpc_sotw_provider.go b/src/provider/xds_grpc_sotw_provider.go new file mode 100644 index 00000000..3b891979 --- /dev/null +++ b/src/provider/xds_grpc_sotw_provider.go @@ -0,0 +1,182 @@ +package provider + +import ( + "context" + "fmt" + "strings" + + corev3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + "github.com/envoyproxy/go-control-plane/pkg/resource/v3" + "github.com/golang/protobuf/ptypes/any" + grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" + logger "github.com/sirupsen/logrus" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/structpb" + + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/stats" + + "github.com/envoyproxy/go-control-plane/pkg/client/sotw/v3" + rls_conf_v3 "github.com/envoyproxy/go-control-plane/ratelimit/config/ratelimit/v3" +) + +// XdsGrpcSotwProvider is the xDS provider which implements `RateLimitConfigProvider` interface. +type XdsGrpcSotwProvider struct { + settings settings.Settings + loader config.RateLimitConfigLoader + configUpdateEventChan chan ConfigUpdateEvent + statsManager stats.Manager + ctx context.Context + adsClient sotw.ADSClient + // connectionRetryChannel is the channel which trigger true for connection issues + connectionRetryChannel chan bool +} + +// NewXdsGrpcSotwProvider initializes xDS listener and returns the xDS provider. +func NewXdsGrpcSotwProvider(settings settings.Settings, statsManager stats.Manager) RateLimitConfigProvider { + ctx := context.Background() + p := &XdsGrpcSotwProvider{ + settings: settings, + statsManager: statsManager, + ctx: ctx, + configUpdateEventChan: make(chan ConfigUpdateEvent), + connectionRetryChannel: make(chan bool), + loader: config.NewRateLimitConfigLoaderImpl(), + adsClient: sotw.NewADSClient(ctx, getClientNode(settings), resource.RateLimitConfigType), + } + go p.initXdsClient() + return p +} + +// ConfigUpdateEvent returns config provider channel +func (p *XdsGrpcSotwProvider) ConfigUpdateEvent() <-chan ConfigUpdateEvent { + return p.configUpdateEventChan +} + +func (p *XdsGrpcSotwProvider) Stop() { + p.connectionRetryChannel <- false +} + +func (p *XdsGrpcSotwProvider) initXdsClient() { + logger.Info("Starting xDS client connection for rate limit configurations") + conn := p.initializeAndWatch() + + for retryEvent := range p.connectionRetryChannel { + if conn != nil { + conn.Close() + } + if !retryEvent { // stop watching + logger.Info("Stopping xDS client watch for rate limit configurations") + break + } + conn = p.initializeAndWatch() + } +} + +func (p *XdsGrpcSotwProvider) initializeAndWatch() *grpc.ClientConn { + conn, err := p.getGrpcConnection() + if err != nil { + logger.Errorf("Error initializing gRPC connection to xDS Management Server: %s", err.Error()) + p.retryGrpcConn() + return nil + } + + logger.Info("Connection to xDS Management Server is successful") + p.adsClient.InitConnect(conn) + go p.watchConfigs() + return conn +} + +func (p *XdsGrpcSotwProvider) watchConfigs() { + for { + resp, err := p.adsClient.Fetch() + if err != nil { + logger.Errorf("Failed to receive configuration from xDS Management Server: %s", err.Error()) + if sotw.IsConnError(err) { + p.retryGrpcConn() + return + } + p.adsClient.Nack(err.Error()) + } else { + logger.Tracef("Response received from xDS Management Server: %v", resp) + p.sendConfigs(resp.Resources) + } + } +} + +func (p *XdsGrpcSotwProvider) getGrpcConnection() (*grpc.ClientConn, error) { + backOff := grpc_retry.BackoffLinearWithJitter(p.settings.ConfigGrpcXdsServerConnectRetryInterval, 0.5) + logger.Infof("Dialing xDS Management Server: '%s'", p.settings.ConfigGrpcXdsServerUrl) + return grpc.Dial( + p.settings.ConfigGrpcXdsServerUrl, + p.getGrpcTransportCredentials(), + grpc.WithBlock(), + grpc.WithStreamInterceptor( + grpc_retry.StreamClientInterceptor(grpc_retry.WithBackoff(backOff)), + )) +} + +func (p *XdsGrpcSotwProvider) getGrpcTransportCredentials() grpc.DialOption { + if !p.settings.ConfigGrpcXdsServerUseTls { + return grpc.WithTransportCredentials(insecure.NewCredentials()) + } + + configGrpcXdsTlsConfig := p.settings.ConfigGrpcXdsTlsConfig + if p.settings.ConfigGrpcXdsServerTlsSAN != "" { + logger.Infof("ServerName used for xDS Management Service hostname verification is %s", p.settings.ConfigGrpcXdsServerTlsSAN) + configGrpcXdsTlsConfig.ServerName = p.settings.ConfigGrpcXdsServerTlsSAN + } + return grpc.WithTransportCredentials(credentials.NewTLS(configGrpcXdsTlsConfig)) +} + +func (p *XdsGrpcSotwProvider) sendConfigs(resources []*any.Any) { + defer func() { + if e := recover(); e != nil { + p.configUpdateEventChan <- &ConfigUpdateEventImpl{err: e} + p.adsClient.Nack(fmt.Sprint(e)) + } + }() + + conf := make([]config.RateLimitConfigToLoad, 0, len(resources)) + for _, res := range resources { + confPb := &rls_conf_v3.RateLimitConfig{} + err := anypb.UnmarshalTo(res, confPb, proto.UnmarshalOptions{}) + if err != nil { + logger.Errorf("Error while unmarshalling config from xDS Management Server: %s", err.Error()) + p.adsClient.Nack(err.Error()) + return + } + + configYaml := config.ConfigXdsProtoToYaml(confPb) + conf = append(conf, config.RateLimitConfigToLoad{Name: confPb.Name, ConfigYaml: configYaml}) + } + rlSettings := settings.NewSettings() + rlsConf := p.loader.Load(conf, p.statsManager, rlSettings.MergeDomainConfigurations) + p.configUpdateEventChan <- &ConfigUpdateEventImpl{config: rlsConf} + p.adsClient.Ack() +} + +func (p *XdsGrpcSotwProvider) retryGrpcConn() { + p.connectionRetryChannel <- true +} + +func getClientNode(s settings.Settings) *corev3.Node { + // setting metadata for node + metadataMap := make(map[string]*structpb.Value) + for _, entry := range strings.Split(s.ConfigGrpcXdsNodeMetadata, ",") { + keyValPair := strings.SplitN(entry, "=", 2) + if len(keyValPair) == 2 { + metadataMap[keyValPair[0]] = structpb.NewStringValue(keyValPair[1]) + } + } + + return &corev3.Node{ + Id: s.ConfigGrpcXdsNodeId, + Metadata: &structpb.Struct{Fields: metadataMap}, + } +} diff --git a/src/server/server.go b/src/server/server.go index 46c8ea5d..aa812f8c 100644 --- a/src/server/server.go +++ b/src/server/server.go @@ -5,7 +5,8 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/lyft/goruntime/loader" + "github.com/envoyproxy/ratelimit/src/provider" + stats "github.com/lyft/gostats" "google.golang.org/grpc" ) @@ -35,9 +36,9 @@ type Server interface { GrpcServer() *grpc.Server /** - * Returns the runtime configuration for the server. + * Returns the configuration provider for the server. */ - Runtime() loader.IFace + Provider() provider.RateLimitConfigProvider /** * Stops serving the grpc port (for integration testing). diff --git a/src/server/server_impl.go b/src/server/server_impl.go index d98fc5ad..ba704449 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -11,7 +11,6 @@ import ( "net/http/pprof" "os" "os/signal" - "path/filepath" "sort" "strconv" "sync" @@ -20,6 +19,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" + "github.com/envoyproxy/ratelimit/src/provider" "github.com/envoyproxy/ratelimit/src/stats" "github.com/coocood/freecache" @@ -59,6 +59,7 @@ type server struct { grpcServer *grpc.Server store gostats.Store scope gostats.Scope + provider provider.RateLimitConfigProvider runtime loader.IFace debugListener serverDebugListener httpServer *http.Server @@ -127,6 +128,18 @@ func NewJsonHandler(svc pb.RateLimitServiceServer) func(http.ResponseWriter, *ht } } +func getProviderImpl(s settings.Settings, statsManager stats.Manager, rootStore gostats.Store) provider.RateLimitConfigProvider { + switch s.ConfigType { + case "FILE": + return provider.NewFileProvider(s, statsManager, rootStore) + case "GRPC_XDS_SOTW": + return provider.NewXdsGrpcSotwProvider(s, statsManager) + default: + logger.Fatalf("Invalid setting for ConfigType: %s", s.ConfigType) + panic("This line should not be reachable") + } +} + func (server *server) AddJsonHandler(svc pb.RateLimitServiceServer) { server.router.HandleFunc("/json", NewJsonHandler(svc)) } @@ -184,8 +197,8 @@ func (server *server) Scope() gostats.Scope { return server.scope } -func (server *server) Runtime() loader.IFace { - return server.runtime +func (server *server) Provider() provider.RateLimitConfigProvider { + return server.provider } func NewServer(s settings.Settings, name string, statsManager stats.Manager, localCache *freecache.Cache, opts ...settings.Option) Server { @@ -234,37 +247,8 @@ func newServer(s settings.Settings, name string, statsManager stats.Manager, loc ret.store.AddStatGenerator(limiter.NewLocalCacheStats(localCache, ret.scope.Scope("localcache"))) } - // setup runtime - loaderOpts := make([]loader.Option, 0, 1) - if s.RuntimeIgnoreDotFiles { - loaderOpts = append(loaderOpts, loader.IgnoreDotFiles) - } else { - loaderOpts = append(loaderOpts, loader.AllowDotFiles) - } - var err error - if s.RuntimeWatchRoot { - ret.runtime, err = loader.New2( - s.RuntimePath, - s.RuntimeSubdirectory, - ret.store.ScopeWithTags("runtime", s.ExtraTags), - &loader.SymlinkRefresher{RuntimePath: s.RuntimePath}, - loaderOpts...) - } else { - directoryRefresher := &loader.DirectoryRefresher{} - // Adding loader.Remove to the default set of goruntime's FileSystemOps. - directoryRefresher.WatchFileSystemOps(loader.Remove, loader.Write, loader.Create, loader.Chmod) - - ret.runtime, err = loader.New2( - filepath.Join(s.RuntimePath, s.RuntimeSubdirectory), - "config", - ret.store.ScopeWithTags("runtime", s.ExtraTags), - directoryRefresher, - loaderOpts...) - } - - if err != nil { - panic(err) - } + // setup config provider + ret.provider = getProviderImpl(s, statsManager, ret.store) // setup http router ret.router = mux.NewRouter() @@ -339,6 +323,7 @@ func (server *server) Stop() { if server.httpServer != nil { server.httpServer.Close() } + server.provider.Stop() } func (server *server) handleGracefulShutdown() { diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index 902c8084..0299d3d3 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -18,13 +18,13 @@ import ( core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/lyft/goruntime/loader" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" + "github.com/envoyproxy/ratelimit/src/provider" "github.com/envoyproxy/ratelimit/src/redis" ) @@ -36,14 +36,11 @@ type RateLimitServiceServer interface { } type service struct { - runtime loader.IFace configLock sync.RWMutex - configLoader config.RateLimitConfigLoader + configUpdateEvent <-chan provider.ConfigUpdateEvent config config.RateLimitConfig - runtimeUpdateEvent chan int cache limiter.RateLimitCache stats stats.ServiceStats - runtimeWatchRoot bool customHeadersEnabled bool customHeaderLimitHeader string customHeaderRemainingHeader string @@ -52,35 +49,25 @@ type service struct { globalShadowMode bool } -func (this *service) reloadConfig(statsManager stats.Manager) { - defer func() { - if e := recover(); e != nil { - configError, ok := e.(config.RateLimitConfigError) - if !ok { - panic(e) - } - - this.stats.ConfigLoadError.Inc() - logger.Errorf("error loading new configuration from runtime: %s", configError.Error()) - } - }() - - files := []config.RateLimitConfigToLoad{} - snapshot := this.runtime.Snapshot() - for _, key := range snapshot.Keys() { - if this.runtimeWatchRoot && !strings.HasPrefix(key, "config.") { - continue +func (this *service) setConfig(updateEvent provider.ConfigUpdateEvent) { + newConfig, err := updateEvent.GetConfig() + if err != nil { + configError, ok := err.(config.RateLimitConfigError) + if !ok { + panic(err) } - files = append(files, config.RateLimitConfigToLoad{key, snapshot.Get(key)}) + this.stats.ConfigLoadError.Inc() + logger.Errorf("Error loading new configuration: %s", configError.Error()) + return } - rlSettings := settings.NewSettings() - newConfig := this.configLoader.Load(files, statsManager, rlSettings.MergeDomainConfigurations) this.stats.ConfigLoadSuccess.Inc() this.configLock.Lock() this.config = newConfig + + rlSettings := settings.NewSettings() this.globalShadowMode = rlSettings.GlobalShadowMode if rlSettings.RateLimitResponseHeadersEnabled { @@ -93,6 +80,7 @@ func (this *service) reloadConfig(statsManager stats.Manager) { this.customHeaderResetHeader = rlSettings.HeaderRatelimitReset } this.configLock.Unlock() + logger.Info("Successfully loaded new configuration") } type serviceError string @@ -312,32 +300,31 @@ func (this *service) GetCurrentConfig() (config.RateLimitConfig, bool) { return this.config, this.globalShadowMode } -func NewService(runtime loader.IFace, cache limiter.RateLimitCache, - configLoader config.RateLimitConfigLoader, statsManager stats.Manager, runtimeWatchRoot bool, clock utils.TimeSource, shadowMode bool) RateLimitServiceServer { +func NewService(cache limiter.RateLimitCache, configProvider provider.RateLimitConfigProvider, statsManager stats.Manager, + clock utils.TimeSource, shadowMode, forceStart bool) RateLimitServiceServer { newService := &service{ - runtime: runtime, - configLock: sync.RWMutex{}, - configLoader: configLoader, - config: nil, - runtimeUpdateEvent: make(chan int), - cache: cache, - stats: statsManager.NewServiceStats(), - runtimeWatchRoot: runtimeWatchRoot, - globalShadowMode: shadowMode, - customHeaderClock: clock, + configLock: sync.RWMutex{}, + configUpdateEvent: configProvider.ConfigUpdateEvent(), + config: nil, + cache: cache, + stats: statsManager.NewServiceStats(), + globalShadowMode: shadowMode, + customHeaderClock: clock, } - runtime.AddUpdateCallback(newService.runtimeUpdateEvent) + if !forceStart { + logger.Info("Waiting for initial ratelimit config update event") + newService.setConfig(<-newService.configUpdateEvent) + logger.Info("Successfully loaded the initial ratelimit configs") + } - newService.reloadConfig(statsManager) go func() { - // No exit right now. for { - logger.Debugf("waiting for runtime update") - <-newService.runtimeUpdateEvent - logger.Debugf("got runtime update and reloading config") - newService.reloadConfig(statsManager) + logger.Debug("Waiting for config update event") + updateEvent := <-newService.configUpdateEvent + logger.Debug("Setting config retrieved from config provider") + newService.setConfig(updateEvent) } }() diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 605aed0e..d4593126 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -21,7 +21,6 @@ import ( logger "github.com/sirupsen/logrus" - "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/memcached" "github.com/envoyproxy/ratelimit/src/redis" @@ -117,13 +116,12 @@ func (runner *Runner) Run() { runner.mu.Unlock() service := ratelimit.NewService( - srv.Runtime(), createLimiter(srv, s, localCache, runner.statsManager), - config.NewRateLimitConfigLoaderImpl(), + srv.Provider(), runner.statsManager, - s.RuntimeWatchRoot, utils.NewTimeSourceImpl(), s.GlobalShadowMode, + s.ForceStartWithoutInitialConfig, ) srv.AddDebugHttpEndpoint( diff --git a/src/settings/settings.go b/src/settings/settings.go index d0731511..eae203dc 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -46,6 +46,28 @@ type Settings struct { LogLevel string `envconfig:"LOG_LEVEL" default:"WARN"` LogFormat string `envconfig:"LOG_FORMAT" default:"text"` + // Rate limit configuration + // ConfigType is the method of configuring rate limits. Possible values "FILE", "GRPC_XDS_SOTW". + ConfigType string `envconfig:"CONFIG_TYPE" default:"FILE"` + // ForceStartWithoutInitialConfig enables start the server without initial rate limit config event + ForceStartWithoutInitialConfig bool `envconfig:"FORCE_START_WITHOUT_INITIAL_CONFIG" default:"false"` + + // xDS rate limit configuration + // ConfigGrpcXdsNodeId is the Node ID. xDS server should set snapshots to this Node ID + ConfigGrpcXdsNodeId string `envconfig:"CONFIG_GRPC_XDS_NODE_ID" default:"default"` + ConfigGrpcXdsNodeMetadata string `envconfig:"CONFIG_GRPC_XDS_NODE_METADATA" default:""` // eg: "key1:val1,key2=val2" + ConfigGrpcXdsServerUrl string `envconfig:"CONFIG_GRPC_XDS_SERVER_URL" default:"localhost:18000"` + ConfigGrpcXdsServerConnectRetryInterval time.Duration `envconfig:"CONFIG_GRPC_XDS_SERVER_CONNECT_RETRY_INTERVAL" default:"3s"` + + // xDS config server TLS configurations + ConfigGrpcXdsTlsConfig *tls.Config + ConfigGrpcXdsServerUseTls bool `envconfig:"CONFIG_GRPC_XDS_SERVER_USE_TLS" default:"false"` + ConfigGrpcXdsClientTlsCert string `envconfig:"CONFIG_GRPC_XDS_CLIENT_TLS_CERT" default:""` + ConfigGrpcXdsClientTlsKey string `envconfig:"CONFIG_GRPC_XDS_CLIENT_TLS_KEY" default:""` + ConfigGrpcXdsServerTlsCACert string `envconfig:"CONFIG_GRPC_XDS_SERVER_TLS_CACERT" default:""` + // GrpcClientTlsSAN is the SAN to validate from the client cert during mTLS auth + ConfigGrpcXdsServerTlsSAN string `envconfig:"CONFIG_GRPC_XDS_SERVER_TLS_SAN" default:""` + // Stats-related settings UseStatsd bool `envconfig:"USE_STATSD" default:"true"` StatsdHost string `envconfig:"STATSD_HOST" default:"localhost"` @@ -149,6 +171,7 @@ func NewSettings() Settings { // When we require TLS to connect to Redis, we check if we need to connect using the provided key-pair. RedisTlsConfig(s.RedisTls || s.RedisPerSecondTls)(&s) GrpcServerTlsConfig()(&s) + ConfigGrpcXdsServerTlsConfig()(&s) return s } @@ -178,6 +201,20 @@ func GrpcServerTlsConfig() Option { } } +func ConfigGrpcXdsServerTlsConfig() Option { + return func(s *Settings) { + if s.ConfigGrpcXdsServerUseTls { + configGrpcXdsServerTlsConfig := utils.TlsConfigFromFiles(s.ConfigGrpcXdsClientTlsCert, s.ConfigGrpcXdsClientTlsKey, s.ConfigGrpcXdsServerTlsCACert, utils.ServerCA) + if s.ConfigGrpcXdsServerTlsCACert != "" { + configGrpcXdsServerTlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + } else { + configGrpcXdsServerTlsConfig.ClientAuth = tls.NoClientCert + } + s.ConfigGrpcXdsTlsConfig = configGrpcXdsServerTlsConfig + } + } +} + func GrpcUnaryInterceptor(i grpc.UnaryServerInterceptor) Option { return func(s *Settings) { s.GrpcUnaryInterceptor = i diff --git a/test/common/xds_sotw.go b/test/common/xds_sotw.go new file mode 100644 index 00000000..1b214531 --- /dev/null +++ b/test/common/xds_sotw.go @@ -0,0 +1,70 @@ +package common + +import ( + "context" + "fmt" + "net" + "testing" + "time" + + "github.com/envoyproxy/go-control-plane/pkg/cache/v3" + "github.com/envoyproxy/go-control-plane/pkg/server/v3" + "google.golang.org/grpc" + + discoverygrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" +) + +type XdsServerConfig struct { + Port int + NodeId string +} + +type SetSnapshotFunc func(*cache.Snapshot) + +func StartXdsSotwServer(t *testing.T, config *XdsServerConfig, initSnapshot *cache.Snapshot) (SetSnapshotFunc, context.CancelFunc) { + t.Helper() + + ctx, cancel := context.WithCancel(context.Background()) + + snapCache := cache.NewSnapshotCache(true, cache.IDHash{}, nil) + if err := initSnapshot.Consistent(); err != nil { + t.Errorf("Error checking consistency in initial snapshot: %v", err) + } + + if err := snapCache.SetSnapshot(context.Background(), config.NodeId, initSnapshot); err != nil { + panic(err) + } + srv := server.NewServer(ctx, snapCache, nil) + + grpcServer := grpc.NewServer() + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", config.Port)) + if err != nil { + t.Errorf("Error listening to port: %v: %v", config.Port, err) + } + discoverygrpc.RegisterAggregatedDiscoveryServiceServer(grpcServer, srv) + go func() { + if err = grpcServer.Serve(lis); err != nil { + t.Error(err) + } + }() + + // HACK: Wait for the server to come up. Make a hook that we can wait on. + WaitForTcpPort(context.Background(), config.Port, 1*time.Second) + + cancelFunc := func() { + cancel() + grpcServer.Stop() + } + return setSnapshotFunc(t, snapCache, config.NodeId), cancelFunc +} + +func setSnapshotFunc(t *testing.T, snapCache cache.SnapshotCache, nodeId string) SetSnapshotFunc { + return func(snapshot *cache.Snapshot) { + if err := snapshot.Consistent(); err != nil { + t.Errorf("snapshot inconsistency: %+v\n%+v", snapshot, err) + } + if err := snapCache.SetSnapshot(context.Background(), nodeId, snapshot); err != nil { + t.Errorf("snapshot error %q for %+v", err, snapshot) + } + } +} diff --git a/test/config/config_test.go b/test/config/config_test.go index be2ab934..9df082c6 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -21,7 +21,8 @@ func loadFile(path string) []config.RateLimitConfigToLoad { if err != nil { panic(err) } - return []config.RateLimitConfigToLoad{{path, string(contents)}} + configYaml := config.ConfigFileContentToYaml(path, string(contents)) + return []config.RateLimitConfigToLoad{{Name: path, ConfigYaml: configYaml}} } func TestBasicConfig(t *testing.T) { diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index a835c508..74cfb8b1 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -95,6 +95,20 @@ func TestBasicConfig(t *testing.T) { }) } +func TestXdsProviderBasicConfig(t *testing.T) { + common.WithMultiRedis(t, []common.RedisConfig{ + {Port: 6383}, + {Port: 6380}, + }, func() { + _, cancel := startXdsSotwServer(t) + defer cancel() + t.Run("WithoutPerSecondRedis", testXdsProviderBasicConfig(false, 0)) + t.Run("WithPerSecondRedis", testXdsProviderBasicConfig(true, 0)) + t.Run("WithoutPerSecondRedisWithLocalCache", testXdsProviderBasicConfig(false, 1000)) + t.Run("WithPerSecondRedisWithLocalCache", testXdsProviderBasicConfig(true, 1000)) + }) +} + func TestBasicConfig_ExtraTags(t *testing.T) { common.WithMultiRedis(t, []common.RedisConfig{ {Port: 6383}, @@ -181,6 +195,17 @@ func TestBasicReloadConfig(t *testing.T) { }) } +func TestXdsProviderBasicConfigReload(t *testing.T) { + common.WithMultiRedis(t, []common.RedisConfig{ + {Port: 6383}, + }, func() { + setSnapshotFunc, cancel := startXdsSotwServer(t) + defer cancel() + + t.Run("ReloadConfigWithXdsServer", testXdsProviderBasicConfigReload(setSnapshotFunc, false, 0)) + }) +} + func makeSimpleMemcacheSettings(memcachePorts []int, localCacheSize int) settings.Settings { s := defaultSettings() var memcacheHostAndPort []string @@ -381,7 +406,7 @@ func testBasicConfigWithoutWatchRootWithRedisSentinel(perSecond bool, local_cach func testBasicConfigReload(perSecond bool, local_cache_size int, runtimeWatchRoot bool) func(*testing.T) { s := makeSimpleRedisSettings(6383, 6380, perSecond, local_cache_size) s.RuntimeWatchRoot = runtimeWatchRoot - return testConfigReload(s) + return testConfigReload(s, reloadNewConfigFile, restoreConfigFile) } func testBasicConfigReloadWithRedisCluster(perSecond bool, local_cache_size int, runtimeWatchRoot string) func(*testing.T) { @@ -395,7 +420,7 @@ func testBasicConfigReloadWithRedisCluster(perSecond bool, local_cache_size int, configRedisCluster(&s) - return testConfigReload(s) + return testConfigReload(s, reloadNewConfigFile, restoreConfigFile) } func testBasicConfigReloadWithRedisSentinel(perSecond bool, local_cache_size int, runtimeWatchRoot bool) func(*testing.T) { @@ -409,7 +434,7 @@ func testBasicConfigReloadWithRedisSentinel(perSecond bool, local_cache_size int s.RuntimeWatchRoot = runtimeWatchRoot - return testConfigReload(s) + return testConfigReload(s, reloadNewConfigFile, restoreConfigFile) } func getCacheKey(cacheKey string, enableLocalCache bool) string { @@ -671,7 +696,7 @@ func startTestRunner(t *testing.T, s settings.Settings) *runner.Runner { return &runner } -func testConfigReload(s settings.Settings) func(*testing.T) { +func testConfigReload(s settings.Settings, reloadConfFunc, restoreConfFunc func()) func(*testing.T) { return func(t *testing.T) { enable_local_cache := s.LocalCacheSizeInBytes > 0 runner := startTestRunner(t, s) @@ -698,26 +723,7 @@ func testConfigReload(s settings.Settings) func(*testing.T) { runner.GetStatsStore().Flush() loadCountBefore := runner.GetStatsStore().NewCounter("ratelimit.service.config_load_success").Value() - // Copy a new file to config folder to test config reload functionality - in, err := os.Open("runtime/current/ratelimit/reload.yaml") - if err != nil { - panic(err) - } - defer in.Close() - out, err := os.Create("runtime/current/ratelimit/config/reload.yaml") - if err != nil { - panic(err) - } - defer out.Close() - _, err = io.Copy(out, in) - if err != nil { - panic(err) - } - err = out.Close() - if err != nil { - panic(err) - } - + reloadConfFunc() loadCountAfter, reloaded := waitForConfigReload(runner, loadCountBefore) assert.True(reloaded) @@ -739,11 +745,7 @@ func testConfigReload(s settings.Settings) func(*testing.T) { response) assert.NoError(err) - err = os.Remove("runtime/current/ratelimit/config/reload.yaml") - if err != nil { - panic(err) - } - + restoreConfFunc() // Removal of config files must trigger a reload loadCountBefore = loadCountAfter loadCountAfter, reloaded = waitForConfigReload(runner, loadCountBefore) @@ -752,6 +754,35 @@ func testConfigReload(s settings.Settings) func(*testing.T) { } } +func reloadNewConfigFile() { + // Copy a new file to config folder to test config reload functionality + in, err := os.Open("runtime/current/ratelimit/reload.yaml") + if err != nil { + panic(err) + } + defer in.Close() + out, err := os.Create("runtime/current/ratelimit/config/reload.yaml") + if err != nil { + panic(err) + } + defer out.Close() + _, err = io.Copy(out, in) + if err != nil { + panic(err) + } + err = out.Close() + if err != nil { + panic(err) + } +} + +func restoreConfigFile() { + err := os.Remove("runtime/current/ratelimit/config/reload.yaml") + if err != nil { + panic(err) + } +} + func waitForConfigReload(runner *runner.Runner, loadCountBefore uint64) (uint64, bool) { // Need to wait for config reload to take place and new descriptors to be loaded. // Shouldn't take more than 5 seconds but wait 120 at most just to be safe. diff --git a/test/integration/xds_sotw_integration_test.go b/test/integration/xds_sotw_integration_test.go new file mode 100644 index 00000000..01f43583 --- /dev/null +++ b/test/integration/xds_sotw_integration_test.go @@ -0,0 +1,182 @@ +//go:build integration + +package integration_test + +import ( + "context" + "testing" + + "github.com/envoyproxy/go-control-plane/pkg/cache/types" + "github.com/envoyproxy/go-control-plane/pkg/cache/v3" + "github.com/envoyproxy/go-control-plane/pkg/resource/v3" + + "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/test/common" + + rls_config "github.com/envoyproxy/go-control-plane/ratelimit/config/ratelimit/v3" +) + +func testXdsProviderBasicConfig(perSecond bool, local_cache_size int) func(*testing.T) { + s := makeSimpleRedisSettings(6383, 6380, perSecond, local_cache_size) + configXdsProvider(&s) + + return testBasicBaseConfig(s) +} + +func testXdsProviderBasicConfigReload(setSnapshotFunc common.SetSnapshotFunc, perSecond bool, local_cache_size int) func(*testing.T) { + s := makeSimpleRedisSettings(6383, 6380, perSecond, local_cache_size) + configXdsProvider(&s) + return testConfigReload(s, newConfigWithXdsConfigProvider(setSnapshotFunc), restoreConfigWithXdsConfigProvider(setSnapshotFunc)) +} + +func configXdsProvider(s *settings.Settings) { + s.ConfigType = "GRPC_XDS_SOTW" + s.ConfigGrpcXdsNodeId = "init-test-node" + s.ConfigGrpcXdsServerUrl = "localhost:18000" +} + +func startXdsSotwServer(t *testing.T) (common.SetSnapshotFunc, context.CancelFunc) { + conf := &common.XdsServerConfig{Port: 18000, NodeId: "init-test-node"} + intSnapshot, err := cache.NewSnapshot("1", initialXdsBasicConfig()) + if err != nil { + panic(err) + } + return common.StartXdsSotwServer(t, conf, intSnapshot) +} + +func initialXdsBasicConfig() map[resource.Type][]types.Resource { + return map[resource.Type][]types.Resource{ + resource.RateLimitConfigType: { + &rls_config.RateLimitConfig{ + Name: "basic", + Domain: "basic", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "key1", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_SECOND, + RequestsPerUnit: 50, + }, + }, + { + Key: "key1_local", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_SECOND, + RequestsPerUnit: 50, + }, + }, + { + Key: "one_per_minute", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 1, + }, + }, + }, + }, + &rls_config.RateLimitConfig{ + Name: "another", + Domain: "another", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "key2", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 20, + }, + }, + { + Key: "key3", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_HOUR, + RequestsPerUnit: 10, + }, + }, + { + Key: "key2_local", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 20, + }, + }, + { + Key: "key3_local", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_HOUR, + RequestsPerUnit: 10, + }, + }, + { + Key: "key4", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_DAY, + RequestsPerUnit: 20, + }, + }, + { + Key: "key4_local", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_DAY, + RequestsPerUnit: 20, + }, + }, + }, + }, + }, + } +} + +func newConfigWithXdsConfigProvider(setSnapshotFunc common.SetSnapshotFunc) func() { + initConfig := initialXdsBasicConfig() + rlsConf := initConfig[resource.RateLimitConfigType] + newRlsConf := append(rlsConf, &rls_config.RateLimitConfig{ + Name: "reload", + Domain: "reload", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "key1", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_SECOND, + RequestsPerUnit: 50, + }, + }, + { + Key: "block", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_SECOND, + RequestsPerUnit: 0, + }, + }, + { + Key: "one_per_minute", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 1, + }, + }, + }, + }) + + newConfig := map[resource.Type][]types.Resource{ + resource.RateLimitConfigType: newRlsConf, + } + newSnapshot, err := cache.NewSnapshot("2", newConfig) + if err != nil { + panic(err) + } + + return func() { + setSnapshotFunc(newSnapshot) + } +} + +func restoreConfigWithXdsConfigProvider(setSnapshotFunc common.SetSnapshotFunc) func() { + newSnapshot, err := cache.NewSnapshot("3", initialXdsBasicConfig()) + if err != nil { + panic(err) + } + + return func() { + setSnapshotFunc(newSnapshot) + } +} diff --git a/test/mocks/provider/provider.go b/test/mocks/provider/provider.go new file mode 100644 index 00000000..376f8379 --- /dev/null +++ b/test/mocks/provider/provider.go @@ -0,0 +1,101 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: /Users/renuka/git/ratelimit/src/provider/provider.go + +// Package mock_provider is a generated GoMock package. +package mock_provider + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + config "github.com/envoyproxy/ratelimit/src/config" + provider "github.com/envoyproxy/ratelimit/src/provider" +) + +// MockRateLimitConfigProvider is a mock of RateLimitConfigProvider interface +type MockRateLimitConfigProvider struct { + ctrl *gomock.Controller + recorder *MockRateLimitConfigProviderMockRecorder +} + +// MockRateLimitConfigProviderMockRecorder is the mock recorder for MockRateLimitConfigProvider +type MockRateLimitConfigProviderMockRecorder struct { + mock *MockRateLimitConfigProvider +} + +// NewMockRateLimitConfigProvider creates a new mock instance +func NewMockRateLimitConfigProvider(ctrl *gomock.Controller) *MockRateLimitConfigProvider { + mock := &MockRateLimitConfigProvider{ctrl: ctrl} + mock.recorder = &MockRateLimitConfigProviderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockRateLimitConfigProvider) EXPECT() *MockRateLimitConfigProviderMockRecorder { + return m.recorder +} + +// ConfigUpdateEvent mocks base method +func (m *MockRateLimitConfigProvider) ConfigUpdateEvent() <-chan provider.ConfigUpdateEvent { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ConfigUpdateEvent") + ret0, _ := ret[0].(<-chan provider.ConfigUpdateEvent) + return ret0 +} + +// ConfigUpdateEvent indicates an expected call of ConfigUpdateEvent +func (mr *MockRateLimitConfigProviderMockRecorder) ConfigUpdateEvent() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConfigUpdateEvent", reflect.TypeOf((*MockRateLimitConfigProvider)(nil).ConfigUpdateEvent)) +} + +// Stop mocks base method +func (m *MockRateLimitConfigProvider) Stop() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Stop") +} + +// Stop indicates an expected call of Stop +func (mr *MockRateLimitConfigProviderMockRecorder) Stop() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockRateLimitConfigProvider)(nil).Stop)) +} + +// MockConfigUpdateEvent is a mock of ConfigUpdateEvent interface +type MockConfigUpdateEvent struct { + ctrl *gomock.Controller + recorder *MockConfigUpdateEventMockRecorder +} + +// MockConfigUpdateEventMockRecorder is the mock recorder for MockConfigUpdateEvent +type MockConfigUpdateEventMockRecorder struct { + mock *MockConfigUpdateEvent +} + +// NewMockConfigUpdateEvent creates a new mock instance +func NewMockConfigUpdateEvent(ctrl *gomock.Controller) *MockConfigUpdateEvent { + mock := &MockConfigUpdateEvent{ctrl: ctrl} + mock.recorder = &MockConfigUpdateEventMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockConfigUpdateEvent) EXPECT() *MockConfigUpdateEventMockRecorder { + return m.recorder +} + +// GetConfig mocks base method +func (m *MockConfigUpdateEvent) GetConfig() (config.RateLimitConfig, any) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetConfig") + ret0, _ := ret[0].(config.RateLimitConfig) + ret1, _ := ret[1].(any) + return ret0, ret1 +} + +// GetConfig indicates an expected call of GetConfig +func (mr *MockConfigUpdateEventMockRecorder) GetConfig() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConfig", reflect.TypeOf((*MockConfigUpdateEvent)(nil).GetConfig)) +} diff --git a/test/provider/xds_grpc_sotw_provider_test.go b/test/provider/xds_grpc_sotw_provider_test.go new file mode 100644 index 00000000..49a124bd --- /dev/null +++ b/test/provider/xds_grpc_sotw_provider_test.go @@ -0,0 +1,330 @@ +package provider_test + +import ( + "fmt" + "os" + "strings" + "testing" + + gostats "github.com/lyft/gostats" + "github.com/stretchr/testify/assert" + + "github.com/envoyproxy/go-control-plane/pkg/cache/types" + "github.com/envoyproxy/go-control-plane/pkg/cache/v3" + "github.com/envoyproxy/go-control-plane/pkg/resource/v3" + + "github.com/envoyproxy/ratelimit/src/provider" + "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/test/common" + "github.com/envoyproxy/ratelimit/test/mocks/stats" + + rls_config "github.com/envoyproxy/go-control-plane/ratelimit/config/ratelimit/v3" +) + +const ( + xdsNodeId = "test-node" + xdsPort = 18001 +) + +func TestXdsProvider(t *testing.T) { + intSnapshot, _ := cache.NewSnapshot("1", + map[resource.Type][]types.Resource{ + resource.RateLimitConfigType: { + &rls_config.RateLimitConfig{ + Name: "foo", + Domain: "foo", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "k1", + Value: "v1", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 3, + }, + }, + }, + }, + }, + }, + ) + setSnapshotFunc, cancel := common.StartXdsSotwServer(t, &common.XdsServerConfig{Port: xdsPort, NodeId: xdsNodeId}, intSnapshot) + defer cancel() + + s := settings.Settings{ + ConfigType: "GRPC_XDS_SOTW", + ConfigGrpcXdsNodeId: xdsNodeId, + ConfigGrpcXdsServerUrl: fmt.Sprintf("localhost:%d", xdsPort), + } + + statsStore := gostats.NewStore(gostats.NewNullSink(), false) + statsManager := stats.NewMockStatManager(statsStore) + p := provider.NewXdsGrpcSotwProvider(s, statsManager) + defer p.Stop() + providerEventChan := p.ConfigUpdateEvent() + + snapVersion := 1 + t.Run("Test initial xDS config", testInitialXdsConfig(&snapVersion, setSnapshotFunc, providerEventChan)) + t.Run("Test new (after initial) xDS config update", testNewXdsConfigUpdate(&snapVersion, setSnapshotFunc, providerEventChan)) + t.Run("Test multi domain xDS config update", testMultiDomainXdsConfigUpdate(&snapVersion, setSnapshotFunc, providerEventChan)) + t.Run("Test limits with deeper xDS config update", testDeeperLimitsXdsConfigUpdate(&snapVersion, setSnapshotFunc, providerEventChan)) + + err := os.Setenv("MERGE_DOMAIN_CONFIG", "true") + defer os.Unsetenv("MERGE_DOMAIN_CONFIG") + if err != nil { + t.Error("Error setting 'MERGE_DOMAIN_CONFIG' environment variable", err) + } + t.Run("Test same domain multiple times xDS config update", testSameDomainMultipleXdsConfigUpdate(setSnapshotFunc, providerEventChan)) +} + +func testInitialXdsConfig(snapVersion *int, setSnapshotFunc common.SetSnapshotFunc, providerEventChan <-chan provider.ConfigUpdateEvent) func(t *testing.T) { + *snapVersion += 1 + return func(t *testing.T) { + assert := assert.New(t) + + configEvent := <-providerEventChan + assert.NotNil(configEvent) + + config, err := configEvent.GetConfig() + assert.Nil(err) + assert.Equal("foo.k1_v1: unit=MINUTE requests_per_unit=3, shadow_mode: false\n", config.Dump()) + } +} + +func testNewXdsConfigUpdate(snapVersion *int, setSnapshotFunc common.SetSnapshotFunc, providerEventChan <-chan provider.ConfigUpdateEvent) func(t *testing.T) { + *snapVersion += 1 + return func(t *testing.T) { + assert := assert.New(t) + + snapshot, _ := cache.NewSnapshot(fmt.Sprint(*snapVersion), + map[resource.Type][]types.Resource{ + resource.RateLimitConfigType: { + &rls_config.RateLimitConfig{ + Name: "foo", + Domain: "foo", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "k2", + Value: "v2", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 5, + }, + }, + }, + }, + }, + }, + ) + setSnapshotFunc(snapshot) + + configEvent := <-providerEventChan + assert.NotNil(configEvent) + + config, err := configEvent.GetConfig() + assert.Nil(err) + assert.Equal("foo.k2_v2: unit=MINUTE requests_per_unit=5, shadow_mode: false\n", config.Dump()) + } +} + +func testMultiDomainXdsConfigUpdate(snapVersion *int, setSnapshotFunc common.SetSnapshotFunc, providerEventChan <-chan provider.ConfigUpdateEvent) func(t *testing.T) { + *snapVersion += 1 + return func(t *testing.T) { + assert := assert.New(t) + + snapshot, _ := cache.NewSnapshot(fmt.Sprint(*snapVersion), + map[resource.Type][]types.Resource{ + resource.RateLimitConfigType: { + &rls_config.RateLimitConfig{ + Name: "foo", + Domain: "foo", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "k1", + Value: "v1", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 10, + }, + }, + }, + }, + &rls_config.RateLimitConfig{ + Name: "bar", + Domain: "bar", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "k1", + Value: "v1", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 100, + }, + }, + }, + }, + }, + }, + ) + setSnapshotFunc(snapshot) + + configEvent := <-providerEventChan + assert.NotNil(configEvent) + + config, err := configEvent.GetConfig() + assert.Nil(err) + assert.ElementsMatch([]string{ + "foo.k1_v1: unit=MINUTE requests_per_unit=10, shadow_mode: false", + "bar.k1_v1: unit=MINUTE requests_per_unit=100, shadow_mode: false", + }, strings.Split(strings.TrimSuffix(config.Dump(), "\n"), "\n")) + } +} + +func testDeeperLimitsXdsConfigUpdate(snapVersion *int, setSnapshotFunc common.SetSnapshotFunc, providerEventChan <-chan provider.ConfigUpdateEvent) func(t *testing.T) { + *snapVersion += 1 + return func(t *testing.T) { + assert := assert.New(t) + + snapshot, _ := cache.NewSnapshot(fmt.Sprint(*snapVersion), + map[resource.Type][]types.Resource{ + resource.RateLimitConfigType: { + &rls_config.RateLimitConfig{ + Name: "foo", + Domain: "foo", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "k1", + Value: "v1", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 10, + }, + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "k2", + RateLimit: &rls_config.RateLimitPolicy{ + Unlimited: true, + }, + }, + { + Key: "k2", + Value: "v2", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_HOUR, + RequestsPerUnit: 15, + }, + }, + }, + }, + { + Key: "j1", + Value: "v2", + RateLimit: &rls_config.RateLimitPolicy{ + Unlimited: true, + }, + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "j2", + RateLimit: &rls_config.RateLimitPolicy{ + Unlimited: true, + }, + }, + { + Key: "j2", + Value: "v2", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_DAY, + RequestsPerUnit: 15, + }, + ShadowMode: true, + }, + }, + }, + }, + }, + &rls_config.RateLimitConfig{ + Name: "bar", + Domain: "bar", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "k1", + Value: "v1", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 100, + }, + }, + }, + }, + }, + }, + ) + setSnapshotFunc(snapshot) + + configEvent := <-providerEventChan + assert.NotNil(configEvent) + + config, err := configEvent.GetConfig() + assert.Nil(err) + assert.ElementsMatch([]string{ + "foo.k1_v1: unit=MINUTE requests_per_unit=10, shadow_mode: false", + "foo.k1_v1.k2: unit=UNKNOWN requests_per_unit=0, shadow_mode: false", + "foo.k1_v1.k2_v2: unit=HOUR requests_per_unit=15, shadow_mode: false", + "foo.j1_v2: unit=UNKNOWN requests_per_unit=0, shadow_mode: false", + "foo.j1_v2.j2: unit=UNKNOWN requests_per_unit=0, shadow_mode: false", + "foo.j1_v2.j2_v2: unit=DAY requests_per_unit=15, shadow_mode: true", + "bar.k1_v1: unit=MINUTE requests_per_unit=100, shadow_mode: false", + }, strings.Split(strings.TrimSuffix(config.Dump(), "\n"), "\n")) + } +} + +func testSameDomainMultipleXdsConfigUpdate(setSnapshotFunc common.SetSnapshotFunc, providerEventChan <-chan provider.ConfigUpdateEvent) func(t *testing.T) { + return func(t *testing.T) { + assert := assert.New(t) + + snapshot, _ := cache.NewSnapshot("3", + map[resource.Type][]types.Resource{ + resource.RateLimitConfigType: { + &rls_config.RateLimitConfig{ + Name: "foo-1", + Domain: "foo", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "k1", + Value: "v1", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 10, + }, + }, + }, + }, + &rls_config.RateLimitConfig{ + Name: "foo-2", + Domain: "foo", + Descriptors: []*rls_config.RateLimitDescriptor{ + { + Key: "k1", + Value: "v2", + RateLimit: &rls_config.RateLimitPolicy{ + Unit: rls_config.RateLimitUnit_MINUTE, + RequestsPerUnit: 100, + }, + }, + }, + }, + }, + }, + ) + setSnapshotFunc(snapshot) + + configEvent := <-providerEventChan + assert.NotNil(configEvent) + + config, err := configEvent.GetConfig() + assert.Nil(err) + assert.ElementsMatch([]string{ + "foo.k1_v2: unit=MINUTE requests_per_unit=100, shadow_mode: false", + "foo.k1_v1: unit=MINUTE requests_per_unit=10, shadow_mode: false", + }, strings.Split(strings.TrimSuffix(config.Dump(), "\n"), "\n")) + } +} diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index 64c95568..e90bbed0 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -6,6 +6,7 @@ import ( "sync" "testing" + "github.com/envoyproxy/ratelimit/src/provider" "github.com/envoyproxy/ratelimit/src/stats" "github.com/envoyproxy/ratelimit/src/utils" @@ -25,8 +26,7 @@ import ( "github.com/envoyproxy/ratelimit/test/common" mock_config "github.com/envoyproxy/ratelimit/test/mocks/config" mock_limiter "github.com/envoyproxy/ratelimit/test/mocks/limiter" - mock_loader "github.com/envoyproxy/ratelimit/test/mocks/runtime/loader" - mock_snapshot "github.com/envoyproxy/ratelimit/test/mocks/runtime/snapshot" + mock_provider "github.com/envoyproxy/ratelimit/test/mocks/provider" mock_stats "github.com/envoyproxy/ratelimit/test/mocks/stats" ) @@ -60,12 +60,11 @@ func newBarrier() barrier { type rateLimitServiceTestSuite struct { assert *assert.Assertions controller *gomock.Controller - runtime *mock_loader.MockIFace - snapshot *mock_snapshot.MockIFace cache *mock_limiter.MockRateLimitCache - configLoader *mock_config.MockRateLimitConfigLoader + configProvider *mock_provider.MockRateLimitConfigProvider + configUpdateEventChan chan provider.ConfigUpdateEvent + configUpdateEvent *mock_provider.MockConfigUpdateEvent config *mock_config.MockRateLimitConfig - runtimeUpdateCallback chan<- int statsManager stats.Manager statStore gostats.Store mockClock utils.TimeSource @@ -81,10 +80,11 @@ func commonSetup(t *testing.T) rateLimitServiceTestSuite { ret := rateLimitServiceTestSuite{} ret.assert = assert.New(t) ret.controller = gomock.NewController(t) - ret.runtime = mock_loader.NewMockIFace(ret.controller) - ret.snapshot = mock_snapshot.NewMockIFace(ret.controller) ret.cache = mock_limiter.NewMockRateLimitCache(ret.controller) - ret.configLoader = mock_config.NewMockRateLimitConfigLoader(ret.controller) + ret.configProvider = mock_provider.NewMockRateLimitConfigProvider(ret.controller) + ret.configUpdateEventChan = make(chan provider.ConfigUpdateEvent) + ret.configUpdateEvent = mock_provider.NewMockConfigUpdateEvent(ret.controller) + // ret.configLoader = mock_config.NewMockRateLimitConfigLoader(ret.controller) ret.config = mock_config.NewMockRateLimitConfig(ret.controller) ret.statStore = gostats.NewStore(gostats.NewNullSink(), false) ret.statsManager = mock_stats.NewMockStatManager(ret.statStore) @@ -92,21 +92,19 @@ func commonSetup(t *testing.T) rateLimitServiceTestSuite { } func (this *rateLimitServiceTestSuite) setupBasicService() ratelimit.RateLimitServiceServer { - this.runtime.EXPECT().AddUpdateCallback(gomock.Any()).Do( - func(callback chan<- int) { - this.runtimeUpdateCallback = callback - }) - this.runtime.EXPECT().Snapshot().Return(this.snapshot).MinTimes(1) - this.snapshot.EXPECT().Keys().Return([]string{"foo", "config.basic_config"}).MinTimes(1) - this.snapshot.EXPECT().Get("config.basic_config").Return("fake_yaml").MinTimes(1) - this.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{Name: "config.basic_config", FileBytes: "fake_yaml"}}, - gomock.Any(), gomock.Any()).Return(this.config) - - // reset exporter before using + barrier := newBarrier() + this.configProvider.EXPECT().ConfigUpdateEvent().Return(this.configUpdateEventChan).Times(1) + this.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { + barrier.signal() + return this.config, nil + }) + go func() { this.configUpdateEventChan <- this.configUpdateEvent }() // initial config update from provider + testSpanExporter.Reset() - return ratelimit.NewService(this.runtime, this.cache, this.configLoader, this.statsManager, true, MockClock{now: int64(2222)}, false) + svc := ratelimit.NewService(this.cache, this.configProvider, this.statsManager, MockClock{now: int64(2222)}, false, false) + barrier.wait() // wait for initial config load + return svc } // once a ratelimit service is initiated, the package always fetches a default tracer from otel runtime and it can't be change until a new round of test is run. It is necessary to keep a package level exporter in this test package in order to correctly run the tests. @@ -116,6 +114,7 @@ func TestService(test *testing.T) { t := commonSetup(test) defer t.controller.Finish() service := t.setupBasicService() + barrier := newBarrier() // First request, config should be loaded. request := common.NewRateLimitRequest("test-domain", [][][2]string{{{"hello", "world"}}}, 1) @@ -133,12 +132,12 @@ func TestService(test *testing.T) { response) t.assert.Nil(err) - // Force a config reload. - barrier := newBarrier() - t.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{Name: "config.basic_config", FileBytes: "fake_yaml"}}, gomock.Any(), gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Manager, bool) { barrier.signal() }).Return(t.config) - t.runtimeUpdateCallback <- 1 + // Force a config reload - config event from config provider. + t.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { + barrier.signal() + return t.config, nil + }) + t.configUpdateEventChan <- t.configUpdateEvent barrier.wait() // Different request. @@ -169,13 +168,11 @@ func TestService(test *testing.T) { t.assert.Nil(err) // Config load failure. - t.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{Name: "config.basic_config", FileBytes: "fake_yaml"}}, gomock.Any(), gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Manager, bool) { - defer barrier.signal() - panic(config.RateLimitConfigError("load error")) - }) - t.runtimeUpdateCallback <- 1 + t.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { + barrier.signal() + return nil, config.RateLimitConfigError("load error") + }) + t.configUpdateEventChan <- t.configUpdateEvent barrier.wait() // Config should still be valid. Also make sure order does not affect results. @@ -222,10 +219,11 @@ func TestServiceGlobalShadowMode(test *testing.T) { // Force a config reload. barrier := newBarrier() - t.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{Name: "config.basic_config", FileBytes: "fake_yaml"}}, gomock.Any(), gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Manager, bool) { barrier.signal() }).Return(t.config) - t.runtimeUpdateCallback <- 1 + t.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { + barrier.signal() + return t.config, nil + }) + t.configUpdateEventChan <- t.configUpdateEvent barrier.wait() // Make a request. @@ -356,10 +354,11 @@ func TestServiceWithCustomRatelimitHeaders(test *testing.T) { // Config reload. barrier := newBarrier() - t.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{Name: "config.basic_config", FileBytes: "fake_yaml"}}, gomock.Any(), gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Manager, bool) { barrier.signal() }).Return(t.config) - t.runtimeUpdateCallback <- 1 + t.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { + barrier.signal() + return t.config, nil + }) + t.configUpdateEventChan <- t.configUpdateEvent barrier.wait() // Make request @@ -408,10 +407,11 @@ func TestServiceWithDefaultRatelimitHeaders(test *testing.T) { // Config reload. barrier := newBarrier() - t.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{Name: "config.basic_config", FileBytes: "fake_yaml"}}, gomock.Any(), gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Manager, bool) { barrier.signal() }).Return(t.config) - t.runtimeUpdateCallback <- 1 + t.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { + barrier.signal() + return t.config, nil + }) + t.configUpdateEventChan <- t.configUpdateEvent barrier.wait() // Make request @@ -495,17 +495,15 @@ func TestInitialLoadError(test *testing.T) { t := commonSetup(test) defer t.controller.Finish() - t.runtime.EXPECT().AddUpdateCallback(gomock.Any()).Do( - func(callback chan<- int) { t.runtimeUpdateCallback = callback }) - t.runtime.EXPECT().Snapshot().Return(t.snapshot).MinTimes(1) - t.snapshot.EXPECT().Keys().Return([]string{"foo", "config.basic_config"}).MinTimes(1) - t.snapshot.EXPECT().Get("config.basic_config").Return("fake_yaml").MinTimes(1) - t.configLoader.EXPECT().Load( - []config.RateLimitConfigToLoad{{Name: "config.basic_config", FileBytes: "fake_yaml"}}, gomock.Any(), gomock.Any()).Do( - func([]config.RateLimitConfigToLoad, stats.Manager, bool) { - panic(config.RateLimitConfigError("load error")) - }) - service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statsManager, true, t.mockClock, false) + t.configProvider.EXPECT().ConfigUpdateEvent().Return(t.configUpdateEventChan).Times(1) + barrier := newBarrier() + t.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { + barrier.signal() + return nil, config.RateLimitConfigError("load error") + }) + go func() { t.configUpdateEventChan <- t.configUpdateEvent }() // initial config update from provider + service := ratelimit.NewService(t.cache, t.configProvider, t.statsManager, t.mockClock, false, false) + barrier.wait() request := common.NewRateLimitRequest("test-domain", [][][2]string{{{"hello", "world"}}}, 1) response, err := service.ShouldRateLimit(context.Background(), request) From 4c08885633e6ff5c142e45055b14d0687165d2fa Mon Sep 17 00:00:00 2001 From: Peter Leng Date: Thu, 2 Mar 2023 08:10:05 -0800 Subject: [PATCH 055/181] Allow override of runtime app directory (#397) Signed-off-by: yleng --- README.md | 9 +++++---- src/provider/file_provider.go | 4 ++-- src/settings/settings.go | 1 + test/integration/integration_test.go | 1 + 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index c114de1a..9366744d 100644 --- a/README.md +++ b/README.md @@ -116,7 +116,7 @@ Support for [v2 rls proto](https://github.com/envoyproxy/data-plane-api/blob/mas ``` - To run the server locally using some sensible default settings you can do this (this will setup the server to read the configuration files from the path you specify): ```bash - USE_STATSD=false LOG_LEVEL=debug REDIS_SOCKET_TYPE=tcp REDIS_URL=localhost:6379 RUNTIME_ROOT=/home/user/src/runtime/data RUNTIME_SUBDIRECTORY=ratelimit + USE_STATSD=false LOG_LEVEL=debug REDIS_SOCKET_TYPE=tcp REDIS_URL=localhost:6379 RUNTIME_ROOT=/home/user/src/runtime/data RUNTIME_SUBDIRECTORY=ratelimit RUNTIME_APPDIRECTORY=config ``` ## Docker-compose setup @@ -593,19 +593,20 @@ package with the following environment variables: ``` RUNTIME_ROOT default:"/srv/runtime_data/current" RUNTIME_SUBDIRECTORY +RUNTIME_APPDIRECTORY default:"config" RUNTIME_IGNOREDOTFILES default:"false" ``` -**Configuration files are loaded from RUNTIME_ROOT/RUNTIME_SUBDIRECTORY/config/\*.yaml** +**Configuration files are loaded from RUNTIME_ROOT/RUNTIME_SUBDIRECTORY/RUNTIME_APPDIRECTORY/\*.yaml** There are two methods for triggering a configuration reload: 1. Symlink RUNTIME_ROOT to a different directory. -2. Update the contents inside `RUNTIME_ROOT/RUNTIME_SUBDIRECTORY/config/` directly. +2. Update the contents inside `RUNTIME_ROOT/RUNTIME_SUBDIRECTORY/RUNTIME_APPDIRECTORY/` directly. The former is the default behavior. To use the latter method, set the `RUNTIME_WATCH_ROOT` environment variable to `false`. -The following filesystem operations on configuration files inside `RUNTIME_ROOT/RUNTIME_SUBDIRECTORY/config/` will force a reload of all config files: +The following filesystem operations on configuration files inside `RUNTIME_ROOT/RUNTIME_SUBDIRECTORY/RUNTIME_APPDIRECTORY/` will force a reload of all config files: - Write - Create diff --git a/src/provider/file_provider.go b/src/provider/file_provider.go index 07bfa11a..a1d2c8c7 100644 --- a/src/provider/file_provider.go +++ b/src/provider/file_provider.go @@ -55,7 +55,7 @@ func (p *FileProvider) sendEvent() { files := []config.RateLimitConfigToLoad{} snapshot := p.runtime.Snapshot() for _, key := range snapshot.Keys() { - if p.runtimeWatchRoot && !strings.HasPrefix(key, "config.") { + if p.runtimeWatchRoot && !strings.HasPrefix(key, p.settings.RuntimeAppDirectory+".") { continue } @@ -91,7 +91,7 @@ func (p *FileProvider) setupRuntime() { p.runtime, err = loader.New2( filepath.Join(p.settings.RuntimePath, p.settings.RuntimeSubdirectory), - "config", + p.settings.RuntimeAppDirectory, p.rootStore.ScopeWithTags("runtime", p.settings.ExtraTags), directoryRefresher, loaderOpts...) diff --git a/src/settings/settings.go b/src/settings/settings.go index eae203dc..b97137ff 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -77,6 +77,7 @@ type Settings struct { // Settings for rate limit configuration RuntimePath string `envconfig:"RUNTIME_ROOT" default:"/srv/runtime_data/current"` RuntimeSubdirectory string `envconfig:"RUNTIME_SUBDIRECTORY"` + RuntimeAppDirectory string `envconfig:"RUNTIME_APPDIRECTORY" default:"config"` RuntimeIgnoreDotFiles bool `envconfig:"RUNTIME_IGNOREDOTFILES" default:"false"` RuntimeWatchRoot bool `envconfig:"RUNTIME_WATCH_ROOT" default:"true"` diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 74cfb8b1..ad9a7373 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -48,6 +48,7 @@ func defaultSettings() settings.Settings { // Set some convenient defaults for all integration tests. s.RuntimePath = "runtime/current" s.RuntimeSubdirectory = "ratelimit" + s.RuntimeAppDirectory = "config" s.RedisPerSecondSocketType = "tcp" s.RedisSocketType = "tcp" s.DebugPort = 8084 From 4faac891fb9b86ad0c11388af03a93097e615b20 Mon Sep 17 00:00:00 2001 From: Peter Leng Date: Wed, 22 Mar 2023 08:05:20 -0700 Subject: [PATCH 056/181] Update health-checks to be based on individual component health and add component for checking if at least one config is loaded. (#398) Signed-off-by: yleng --- README.md | 27 ++++++ src/config/config.go | 3 + src/config/config_impl.go | 4 + src/redis/driver_impl.go | 10 +- src/server/health.go | 97 +++++++++++++++++--- src/server/server.go | 8 +- src/server/server_impl.go | 10 +- src/service/ratelimit.go | 23 ++++- src/service_cmd/runner/runner.go | 2 + src/settings/settings.go | 3 + test/config/config_test.go | 1 + test/mocks/config/config.go | 14 +++ test/server/health_test.go | 152 ++++++++++++++++++++++++++++++- test/service/ratelimit_test.go | 100 +++++++++++++++++++- 14 files changed, 418 insertions(+), 36 deletions(-) diff --git a/README.md b/README.md index 9366744d..ae7880a5 100644 --- a/README.md +++ b/README.md @@ -32,6 +32,8 @@ - [xDS Management Server Based Configuration Loading](#xds-management-server-based-configuration-loading) - [Log Format](#log-format) - [GRPC Keepalive](#grpc-keepalive) + - [Health-check](#health-check) + - [Health-check configurations](#health-check-configurations) - [Request Fields](#request-fields) - [GRPC Client](#grpc-client) - [Commandline flags](#commandline-flags) @@ -686,6 +688,31 @@ The behavior can be fixed by configuring the following env variables for the rat - `GRPC_MAX_CONNECTION_AGE`: a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway. A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms. - `GRPC_MAX_CONNECTION_AGE_GRACE`: an additive period after MaxConnectionAge after which the connection will be forcibly closed. +## Health-check + +Health check status is determined internally by individual components. +Currently, we have three components that determine the overall health status of the rate limit service. +Each of the individual component's health needs to be healthy for the overall to report healthy. +Some components may be turned OFF via configurations so overall health is not effected by that component's health status. + +- Redis health (Turned ON. Defaults to healthy) +- Configuration status (Turned OFF unless configured to be ON via `HEALTHY_WITH_AT_LEAST_ONE_CONFIG_LOADED` see below section. Defaults to unhealthy) + - If the environment variable is enabled then, it will start in an unhealthy state and become healthy when at least one config is loaded. If we later fail to load any configs, it will go unhealthy again. +- Sigterm (Turned ON. Defaults to healthy) + - Turns unhealthy if receives sigterm signal + All components needs to be healthy for overall health to be healthy. + +### Health-check configurations + +Health check can be configured to check if rate-limit configurations are loaded using the following environment variable. + +``` +HEALTHY_WITH_AT_LEAST_ONE_CONFIG_LOADED default:"false"` +``` + +If `HEALTHY_WITH_AT_LEAST_ONE_CONFIG_LOADED` is enabled then health check will start as unhealthy and becomes healthy if +it detects at least one domain is loaded with the config. If it detects no config again then it will change to unhealthy. + # Request Fields For information on the fields of a Ratelimit gRPC request please read the information diff --git a/src/config/config.go b/src/config/config.go index 86f941ef..ca24d1e2 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -38,6 +38,9 @@ type RateLimitConfig interface { // @param descriptor supplies the descriptor to look up. // @return a rate limit to apply or nil if no rate limit is configured for the descriptor. GetLimit(ctx context.Context, domain string, descriptor *pb_struct.RateLimitDescriptor) *RateLimit + + // Check if the domains is empty which corresponds to no config loaded. + IsEmptyDomains() bool } // Information for a config file to load into the aggregate config. diff --git a/src/config/config_impl.go b/src/config/config_impl.go index 2c9dcbcb..47ea3633 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -336,6 +336,10 @@ func (this *rateLimitConfigImpl) GetLimit( return rateLimit } +func (this *rateLimitConfigImpl) IsEmptyDomains() bool { + return len(this.domains) == 0 +} + func descriptorKey(domain string, descriptor *pb_struct.RateLimitDescriptor) string { rateLimitKey := "" for _, entry := range descriptor.Entries { diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index 9fac55b5..436458ca 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -36,7 +36,10 @@ func poolTrace(ps *poolStats, healthCheckActiveConnection bool, srv server.Serve ps.connectionTotal.Add(1) ps.connectionActive.Add(1) if healthCheckActiveConnection && srv != nil { - srv.HealthCheckOK() + err := srv.HealthChecker().Ok(server.RedisHealthComponentName) + if err != nil { + logger.Errorf("Unable to update health status: %s", err) + } } } else { fmt.Println("creating redis connection error :", newConn.Err) @@ -46,7 +49,10 @@ func poolTrace(ps *poolStats, healthCheckActiveConnection bool, srv server.Serve ps.connectionActive.Sub(1) ps.connectionClose.Add(1) if healthCheckActiveConnection && srv != nil && ps.connectionActive.Value() == 0 { - srv.HealthCheckFail() + err := srv.HealthChecker().Fail(server.RedisHealthComponentName) + if err != nil { + logger.Errorf("Unable to update health status: %s", err) + } } }, } diff --git a/src/server/health.go b/src/server/health.go index d2eb2b76..9af7d2dc 100644 --- a/src/server/health.go +++ b/src/server/health.go @@ -1,37 +1,78 @@ package server import ( + "errors" + "fmt" "net/http" "os" "os/signal" + "sync" "sync/atomic" "syscall" + logger "github.com/sirupsen/logrus" + "google.golang.org/grpc/health" healthpb "google.golang.org/grpc/health/grpc_health_v1" ) type HealthChecker struct { - grpc *health.Server - ok uint32 - name string + sync.Mutex + grpc *health.Server + healthMap map[string]bool + ok uint32 + name string +} + +const ( + ConfigHealthComponentName = "config" + RedisHealthComponentName = "redis" + SigtermComponentName = "sigterm" +) + +func areAllComponentsHealthy(healthMap map[string]bool) bool { + allComponentsHealthy := true + for _, value := range healthMap { + if value == false { + allComponentsHealthy = false + break + } + } + return allComponentsHealthy } -func NewHealthChecker(grpcHealthServer *health.Server, name string) *HealthChecker { +// NewHealthChecker +// Only set the overall health to be Ok if all individual components are healthy. +func NewHealthChecker(grpcHealthServer *health.Server, name string, healthyWithAtLeastOneConfigLoad bool) *HealthChecker { ret := &HealthChecker{} - ret.ok = 1 ret.name = name + ret.healthMap = make(map[string]bool) + // Store health states of components into map + ret.healthMap[RedisHealthComponentName] = true + if healthyWithAtLeastOneConfigLoad { + // config starts in failed state since we need at least one config loaded to be healthy + ret.healthMap[ConfigHealthComponentName] = false + } + // True indicates we have not received sigterm + ret.healthMap[SigtermComponentName] = true + ret.grpc = grpcHealthServer - ret.grpc.SetServingStatus(ret.name, healthpb.HealthCheckResponse_SERVING) + + if areAllComponentsHealthy(ret.healthMap) { + ret.grpc.SetServingStatus(ret.name, healthpb.HealthCheckResponse_SERVING) + ret.ok = 1 + } else { + ret.grpc.SetServingStatus(ret.name, healthpb.HealthCheckResponse_NOT_SERVING) + ret.ok = 0 + } sigterm := make(chan os.Signal, 1) signal.Notify(sigterm, syscall.SIGTERM) go func() { <-sigterm - atomic.StoreUint32(&ret.ok, 0) - ret.grpc.SetServingStatus(ret.name, healthpb.HealthCheckResponse_NOT_SERVING) + _ = ret.Fail(SigtermComponentName) }() return ret @@ -46,14 +87,42 @@ func (hc *HealthChecker) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } -func (hc *HealthChecker) Fail() { - atomic.StoreUint32(&hc.ok, 0) - hc.grpc.SetServingStatus(hc.name, healthpb.HealthCheckResponse_NOT_SERVING) +func (hc *HealthChecker) Fail(componentName string) error { + hc.Lock() + defer hc.Unlock() + if _, ok := hc.healthMap[componentName]; ok { + // Set component to be unhealthy + hc.healthMap[componentName] = false + atomic.StoreUint32(&hc.ok, 0) + hc.grpc.SetServingStatus(hc.name, healthpb.HealthCheckResponse_NOT_SERVING) + } else { + errorText := fmt.Sprintf("Invalid component: %s", componentName) + logger.Errorf(errorText) + return errors.New(errorText) + } + return nil } -func (hc *HealthChecker) Ok() { - atomic.StoreUint32(&hc.ok, 1) - hc.grpc.SetServingStatus(hc.name, healthpb.HealthCheckResponse_SERVING) +func (hc *HealthChecker) Ok(componentName string) error { + hc.Lock() + defer hc.Unlock() + + if _, ok := hc.healthMap[componentName]; ok { + // Set component to be healthy + hc.healthMap[componentName] = true + allComponentsHealthy := areAllComponentsHealthy(hc.healthMap) + + if allComponentsHealthy { + atomic.StoreUint32(&hc.ok, 1) + hc.grpc.SetServingStatus(hc.name, healthpb.HealthCheckResponse_SERVING) + } + } else { + errorText := fmt.Sprintf("Invalid component: %s", componentName) + logger.Errorf(errorText) + return errors.New(errorText) + } + + return nil } func (hc *HealthChecker) Server() *health.Server { diff --git a/src/server/server.go b/src/server/server.go index aa812f8c..7202fa2f 100644 --- a/src/server/server.go +++ b/src/server/server.go @@ -35,6 +35,11 @@ type Server interface { */ GrpcServer() *grpc.Server + /** + * Returns the health checker for the server. + */ + HealthChecker() *HealthChecker + /** * Returns the configuration provider for the server. */ @@ -44,7 +49,4 @@ type Server interface { * Stops serving the grpc port (for integration testing). */ Stop() - - HealthCheckFail() - HealthCheckOK() } diff --git a/src/server/server_impl.go b/src/server/server_impl.go index ba704449..dbadbe1e 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -254,7 +254,7 @@ func newServer(s settings.Settings, name string, statsManager stats.Manager, loc ret.router = mux.NewRouter() // setup healthcheck path - ret.health = NewHealthChecker(health.NewServer(), "ratelimit") + ret.health = NewHealthChecker(health.NewServer(), "ratelimit", s.HealthyWithAtLeastOneConfigLoaded) ret.router.Path("/healthcheck").Handler(ret.health) healthpb.RegisterHealthServer(ret.grpcServer, ret.health.Server()) @@ -339,10 +339,6 @@ func (server *server) handleGracefulShutdown() { }() } -func (server *server) HealthCheckFail() { - server.health.Fail() -} - -func (server *server) HealthCheckOK() { - server.health.Ok() +func (server *server) HealthChecker() *HealthChecker { + return server.health } diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index 0299d3d3..ed6e888f 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -26,6 +26,7 @@ import ( "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/provider" "github.com/envoyproxy/ratelimit/src/redis" + "github.com/envoyproxy/ratelimit/src/server" ) var tracer = otel.Tracer("ratelimit") @@ -41,6 +42,7 @@ type service struct { config config.RateLimitConfig cache limiter.RateLimitCache stats stats.ServiceStats + health *server.HealthChecker customHeadersEnabled bool customHeaderLimitHeader string customHeaderRemainingHeader string @@ -49,7 +51,7 @@ type service struct { globalShadowMode bool } -func (this *service) setConfig(updateEvent provider.ConfigUpdateEvent) { +func (this *service) setConfig(updateEvent provider.ConfigUpdateEvent, healthyWithAtLeastOneConfigLoad bool) { newConfig, err := updateEvent.GetConfig() if err != nil { configError, ok := err.(config.RateLimitConfigError) @@ -62,6 +64,18 @@ func (this *service) setConfig(updateEvent provider.ConfigUpdateEvent) { return } + if healthyWithAtLeastOneConfigLoad { + err = nil + if !newConfig.IsEmptyDomains() { + err = this.health.Ok(server.ConfigHealthComponentName) + } else { + err = this.health.Fail(server.ConfigHealthComponentName) + } + if err != nil { + logger.Errorf("Unable to update health status: %s", err) + } + } + this.stats.ConfigLoadSuccess.Inc() this.configLock.Lock() @@ -301,7 +315,7 @@ func (this *service) GetCurrentConfig() (config.RateLimitConfig, bool) { } func NewService(cache limiter.RateLimitCache, configProvider provider.RateLimitConfigProvider, statsManager stats.Manager, - clock utils.TimeSource, shadowMode, forceStart bool) RateLimitServiceServer { + health *server.HealthChecker, clock utils.TimeSource, shadowMode, forceStart bool, healthyWithAtLeastOneConfigLoad bool) RateLimitServiceServer { newService := &service{ configLock: sync.RWMutex{}, @@ -309,13 +323,14 @@ func NewService(cache limiter.RateLimitCache, configProvider provider.RateLimitC config: nil, cache: cache, stats: statsManager.NewServiceStats(), + health: health, globalShadowMode: shadowMode, customHeaderClock: clock, } if !forceStart { logger.Info("Waiting for initial ratelimit config update event") - newService.setConfig(<-newService.configUpdateEvent) + newService.setConfig(<-newService.configUpdateEvent, healthyWithAtLeastOneConfigLoad) logger.Info("Successfully loaded the initial ratelimit configs") } @@ -324,7 +339,7 @@ func NewService(cache limiter.RateLimitCache, configProvider provider.RateLimitC logger.Debug("Waiting for config update event") updateEvent := <-newService.configUpdateEvent logger.Debug("Setting config retrieved from config provider") - newService.setConfig(updateEvent) + newService.setConfig(updateEvent, healthyWithAtLeastOneConfigLoad) } }() diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index d4593126..f59ce445 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -119,9 +119,11 @@ func (runner *Runner) Run() { createLimiter(srv, s, localCache, runner.statsManager), srv.Provider(), runner.statsManager, + srv.HealthChecker(), utils.NewTimeSourceImpl(), s.GlobalShadowMode, s.ForceStartWithoutInitialConfig, + s.HealthyWithAtLeastOneConfigLoaded, ) srv.AddDebugHttpEndpoint( diff --git a/src/settings/settings.go b/src/settings/settings.go index b97137ff..593b48d5 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -97,6 +97,9 @@ type Settings struct { // value: remaining seconds HeaderRatelimitReset string `envconfig:"LIMIT_RESET_HEADER" default:"RateLimit-Reset"` + // Health-check settings + HealthyWithAtLeastOneConfigLoaded bool `envconfig:"HEALTHY_WITH_AT_LEAST_ONE_CONFIG_LOADED" default:"false"` + // Redis settings RedisSocketType string `envconfig:"REDIS_SOCKET_TYPE" default:"unix"` RedisType string `envconfig:"REDIS_TYPE" default:"SINGLE"` diff --git a/test/config/config_test.go b/test/config/config_test.go index 9df082c6..bbab774b 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -30,6 +30,7 @@ func TestBasicConfig(t *testing.T) { stats := stats.NewStore(stats.NewNullSink(), false) rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), mockstats.NewMockStatManager(stats), false) rlConfig.Dump() + assert.Equal(rlConfig.IsEmptyDomains(), false) assert.Nil(rlConfig.GetLimit(nil, "foo_domain", &pb_struct.RateLimitDescriptor{})) assert.Nil(rlConfig.GetLimit(nil, "test-domain", &pb_struct.RateLimitDescriptor{})) diff --git a/test/mocks/config/config.go b/test/mocks/config/config.go index f5989eb7..7875387c 100644 --- a/test/mocks/config/config.go +++ b/test/mocks/config/config.go @@ -66,6 +66,20 @@ func (mr *MockRateLimitConfigMockRecorder) GetLimit(arg0, arg1, arg2 interface{} return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLimit", reflect.TypeOf((*MockRateLimitConfig)(nil).GetLimit), arg0, arg1, arg2) } +// IsEmptyDomains mocks base method +func (m *MockRateLimitConfig) IsEmptyDomains() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsEmptyDomains") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsEmptyDomains indicates an expected call of IsEmptyDomains +func (mr *MockRateLimitConfigMockRecorder) IsEmptyDomains() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsEmptyDomains", reflect.TypeOf((*MockRateLimitConfig)(nil).IsEmptyDomains)) +} + // MockRateLimitConfigLoader is a mock of RateLimitConfigLoader interface type MockRateLimitConfigLoader struct { ctrl *gomock.Controller diff --git a/test/server/health_test.go b/test/server/health_test.go index d9610507..fd0de4bd 100644 --- a/test/server/health_test.go +++ b/test/server/health_test.go @@ -20,7 +20,7 @@ func TestHealthCheck(t *testing.T) { recorder := httptest.NewRecorder() - hc := server.NewHealthChecker(health.NewServer(), "ratelimit") + hc := server.NewHealthChecker(health.NewServer(), "ratelimit", false) r, _ := http.NewRequest("GET", "http://1.2.3.4/healthcheck", nil) hc.ServeHTTP(recorder, r) @@ -33,7 +33,10 @@ func TestHealthCheck(t *testing.T) { t.Errorf("expected body 'OK', got '%s'", recorder.Body.String()) } - hc.Fail() + err := hc.Fail(server.RedisHealthComponentName) + if err != nil { + t.Errorf("Expected no errors for updating redis health status") + } recorder = httptest.NewRecorder() @@ -43,13 +46,133 @@ func TestHealthCheck(t *testing.T) { if 500 != recorder.Code { t.Errorf("expected code 500 actual %d", recorder.Code) } + + err = hc.Ok(server.RedisHealthComponentName) + if err != nil { + t.Errorf("Expected no errors for updating redis health status") + } + + recorder = httptest.NewRecorder() + + r, _ = http.NewRequest("GET", "http://1.2.3.4/healthcheck", nil) + hc.ServeHTTP(recorder, r) + + if 200 != recorder.Code { + t.Errorf("expected code 200 actual %d", recorder.Code) + } + + if "OK" != recorder.Body.String() { + t.Errorf("expected body 'OK', got '%s'", recorder.Body.String()) + } +} + +func TestHealthyWithAtLeastOneConfigLoaded(t *testing.T) { + defer signal.Reset(syscall.SIGTERM) + + recorder := httptest.NewRecorder() + + hc := server.NewHealthChecker(health.NewServer(), "ratelimit", true) + + r, _ := http.NewRequest("GET", "http://1.2.3.4/healthcheck", nil) + hc.ServeHTTP(recorder, r) + + if 500 != recorder.Code { + t.Errorf("expected code 500 actual %d", recorder.Code) + } + + err := hc.Ok(server.ConfigHealthComponentName) + if err != nil { + t.Errorf("Expected no errors for updating config health status") + } + + recorder = httptest.NewRecorder() + + r, _ = http.NewRequest("GET", "http://1.2.3.4/healthcheck", nil) + hc.ServeHTTP(recorder, r) + + if 200 != recorder.Code { + t.Errorf("expected code 200 actual %d", recorder.Code) + } + + if "OK" != recorder.Body.String() { + t.Errorf("expected body 'OK', got '%s'", recorder.Body.String()) + } + + err = hc.Fail(server.RedisHealthComponentName) + if err != nil { + t.Errorf("Expected no errors for updating redis health status") + } + + recorder = httptest.NewRecorder() + + r, _ = http.NewRequest("GET", "http://1.2.3.4/healthcheck", nil) + hc.ServeHTTP(recorder, r) + + if 500 != recorder.Code { + t.Errorf("expected code 500 actual %d", recorder.Code) + } + + err = hc.Ok(server.RedisHealthComponentName) + if err != nil { + t.Errorf("Expected no errors for updating redis health status") + } + + recorder = httptest.NewRecorder() + + r, _ = http.NewRequest("GET", "http://1.2.3.4/healthcheck", nil) + hc.ServeHTTP(recorder, r) + + if 200 != recorder.Code { + t.Errorf("expected code 200 actual %d", recorder.Code) + } + + if "OK" != recorder.Body.String() { + t.Errorf("expected body 'OK', got '%s'", recorder.Body.String()) + } } func TestGrpcHealthCheck(t *testing.T) { defer signal.Reset(syscall.SIGTERM) grpcHealthServer := health.NewServer() - hc := server.NewHealthChecker(grpcHealthServer, "ratelimit") + hc := server.NewHealthChecker(grpcHealthServer, "ratelimit", false) + healthpb.RegisterHealthServer(grpc.NewServer(), grpcHealthServer) + + req := &healthpb.HealthCheckRequest{ + Service: "ratelimit", + } + + res, _ := grpcHealthServer.Check(context.Background(), req) + if healthpb.HealthCheckResponse_SERVING != res.Status { + t.Errorf("expected status SERVING actual %v", res.Status) + } + + err := hc.Ok(server.RedisHealthComponentName) + if err != nil { + t.Errorf("Expected no errors for updating redis health status") + } + + res, _ = grpcHealthServer.Check(context.Background(), req) + if healthpb.HealthCheckResponse_SERVING != res.Status { + t.Errorf("expected status SERVING actual %v", res.Status) + } + + err = hc.Fail(server.RedisHealthComponentName) + if err != nil { + t.Errorf("Expected no errors for updating redis health status") + } + + res, _ = grpcHealthServer.Check(context.Background(), req) + if healthpb.HealthCheckResponse_NOT_SERVING != res.Status { + t.Errorf("expected status NOT_SERVING actual %v", res.Status) + } +} + +func TestGrpcHealthyWithAtLeastOneConfigLoaded(t *testing.T) { + defer signal.Reset(syscall.SIGTERM) + + grpcHealthServer := health.NewServer() + hc := server.NewHealthChecker(grpcHealthServer, "ratelimit", true) healthpb.RegisterHealthServer(grpc.NewServer(), grpcHealthServer) req := &healthpb.HealthCheckRequest{ @@ -57,11 +180,32 @@ func TestGrpcHealthCheck(t *testing.T) { } res, _ := grpcHealthServer.Check(context.Background(), req) + if healthpb.HealthCheckResponse_NOT_SERVING != res.Status { + t.Errorf("expected status NOT_SERVING actual %v", res.Status) + } + + err := hc.Ok(server.ConfigHealthComponentName) + if err != nil { + t.Errorf("Expected no errors for updating config health status") + } + err = hc.Ok(server.RedisHealthComponentName) + if err != nil { + t.Errorf("Expected no errors for updating redis health status") + } + + res, _ = grpcHealthServer.Check(context.Background(), req) if healthpb.HealthCheckResponse_SERVING != res.Status { t.Errorf("expected status SERVING actual %v", res.Status) } - hc.Fail() + err = hc.Fail(server.ConfigHealthComponentName) + if err != nil { + t.Errorf("Expected no errors for updating config health status") + } + err = hc.Fail(server.RedisHealthComponentName) + if err != nil { + t.Errorf("Expected no errors for updating redis health status") + } res, _ = grpcHealthServer.Check(context.Background(), req) if healthpb.HealthCheckResponse_NOT_SERVING != res.Status { diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index e90bbed0..ba1595bb 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -3,7 +3,9 @@ package ratelimit_test import ( "math" "os" + "os/signal" "sync" + "syscall" "testing" "github.com/envoyproxy/ratelimit/src/provider" @@ -17,11 +19,15 @@ import ( gostats "github.com/lyft/gostats" "github.com/stretchr/testify/assert" "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/health" + healthpb "google.golang.org/grpc/health/grpc_health_v1" "github.com/envoyproxy/ratelimit/src/trace" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/redis" + server "github.com/envoyproxy/ratelimit/src/server" ratelimit "github.com/envoyproxy/ratelimit/src/service" "github.com/envoyproxy/ratelimit/test/common" mock_config "github.com/envoyproxy/ratelimit/test/mocks/config" @@ -65,6 +71,7 @@ type rateLimitServiceTestSuite struct { configUpdateEventChan chan provider.ConfigUpdateEvent configUpdateEvent *mock_provider.MockConfigUpdateEvent config *mock_config.MockRateLimitConfig + health *server.HealthChecker statsManager stats.Manager statStore gostats.Store mockClock utils.TimeSource @@ -88,12 +95,14 @@ func commonSetup(t *testing.T) rateLimitServiceTestSuite { ret.config = mock_config.NewMockRateLimitConfig(ret.controller) ret.statStore = gostats.NewStore(gostats.NewNullSink(), false) ret.statsManager = mock_stats.NewMockStatManager(ret.statStore) + ret.health = server.NewHealthChecker(health.NewServer(), "ratelimit", false) return ret } func (this *rateLimitServiceTestSuite) setupBasicService() ratelimit.RateLimitServiceServer { barrier := newBarrier() this.configProvider.EXPECT().ConfigUpdateEvent().Return(this.configUpdateEventChan).Times(1) + this.config.EXPECT().IsEmptyDomains().Return(false).AnyTimes() this.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { barrier.signal() return this.config, nil @@ -102,7 +111,7 @@ func (this *rateLimitServiceTestSuite) setupBasicService() ratelimit.RateLimitSe testSpanExporter.Reset() - svc := ratelimit.NewService(this.cache, this.configProvider, this.statsManager, MockClock{now: int64(2222)}, false, false) + svc := ratelimit.NewService(this.cache, this.configProvider, this.statsManager, this.health, MockClock{now: int64(2222)}, false, false, false) barrier.wait() // wait for initial config load return svc } @@ -502,7 +511,7 @@ func TestInitialLoadError(test *testing.T) { return nil, config.RateLimitConfigError("load error") }) go func() { t.configUpdateEventChan <- t.configUpdateEvent }() // initial config update from provider - service := ratelimit.NewService(t.cache, t.configProvider, t.statsManager, t.mockClock, false, false) + service := ratelimit.NewService(t.cache, t.configProvider, t.statsManager, t.health, t.mockClock, false, false, false) barrier.wait() request := common.NewRateLimitRequest("test-domain", [][][2]string{{{"hello", "world"}}}, 1) @@ -578,3 +587,90 @@ func TestServiceTracer(test *testing.T) { t.assert.Len(spanStubs, 1) t.assert.Equal(spanStubs[0].Name, "ShouldRateLimit Execution") } + +func TestServiceHealthStatus(test *testing.T) { + t := commonSetup(test) + defer t.controller.Finish() + defer signal.Reset(syscall.SIGTERM) + + healthyWithAtLeastOneConfigLoaded := false + grpcHealthServer := health.NewServer() + hc := server.NewHealthChecker(grpcHealthServer, "ratelimit", healthyWithAtLeastOneConfigLoaded) + healthpb.RegisterHealthServer(grpc.NewServer(), grpcHealthServer) + + // Set up the service + t.configProvider.EXPECT().ConfigUpdateEvent().Return(t.configUpdateEventChan).Times(1) + _ = ratelimit.NewService(t.cache, t.configProvider, t.statsManager, hc, MockClock{now: int64(2222)}, false, true, healthyWithAtLeastOneConfigLoaded) + + // Health check request + req := &healthpb.HealthCheckRequest{ + Service: "ratelimit", + } + + // Service should report healthy at start. + res, _ := grpcHealthServer.Check(context.Background(), req) + if healthpb.HealthCheckResponse_SERVING != res.Status { + test.Errorf("expected status SERVING actual %v", res.Status) + } +} + +func TestServiceHealthStatusAtLeastOneConfigLoaded(test *testing.T) { + t := commonSetup(test) + barrier := newBarrier() + defer t.controller.Finish() + defer signal.Reset(syscall.SIGTERM) + + healthyWithAtLeastOneConfigLoaded := true + grpcHealthServer := health.NewServer() + hc := server.NewHealthChecker(grpcHealthServer, "ratelimit", healthyWithAtLeastOneConfigLoaded) + healthpb.RegisterHealthServer(grpc.NewServer(), grpcHealthServer) + + // Set up the service + t.configProvider.EXPECT().ConfigUpdateEvent().Return(t.configUpdateEventChan).Times(1) + _ = ratelimit.NewService(t.cache, t.configProvider, t.statsManager, hc, MockClock{now: int64(2222)}, false, true, healthyWithAtLeastOneConfigLoaded) + + // Health check request + req := &healthpb.HealthCheckRequest{ + Service: "ratelimit", + } + + // Service should report unhealthy since no config loaded at start + res, _ := grpcHealthServer.Check(context.Background(), req) + if healthpb.HealthCheckResponse_NOT_SERVING != res.Status { + test.Errorf("expected status NOT_SERVING actual %v", res.Status) + } + + // Force a config load - config event from config provider. + t.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { + return t.config, nil + }) + t.config.EXPECT().IsEmptyDomains().DoAndReturn(func() bool { + barrier.signal() + return false + }).Times(1) + t.configUpdateEventChan <- t.configUpdateEvent + barrier.wait() + + // Service should report healthy since config loaded + res, _ = grpcHealthServer.Check(context.Background(), req) + if healthpb.HealthCheckResponse_SERVING != res.Status { + test.Errorf("expected status SERVING actual %v", res.Status) + } + + // Force reload of an invalid config with no domains - config event from config provider. + t.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { + return t.config, nil + }) + t.config.EXPECT().IsEmptyDomains().DoAndReturn(func() bool { + barrier.signal() + return true + }).Times(1) + t.configUpdateEventChan <- t.configUpdateEvent + barrier.wait() + + // Service should report unhealthy since no config loaded at start + res, _ = grpcHealthServer.Check(context.Background(), req) + if healthpb.HealthCheckResponse_NOT_SERVING != res.Status { + test.Errorf("expected status NOT_SERVING actual %v", res.Status) + } +} From 008b66acb35b95630b2d10aff17f8a7a5a4174e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Povilas=20Balzaravi=C4=8Dius?= Date: Mon, 27 Mar 2023 17:57:40 +0300 Subject: [PATCH 057/181] Add wildcard support to descriptor values (#403) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Povilas Balzaravičius --- README.md | 20 +++++++++++++ src/config/config_impl.go | 24 +++++++++++++--- test/config/config_test.go | 59 ++++++++++++++++++++++++++++++++++++++ test/config/wildcard.yaml | 22 ++++++++++++++ 4 files changed, 121 insertions(+), 4 deletions(-) create mode 100644 test/config/wildcard.yaml diff --git a/README.md b/README.md index ae7880a5..4b608150 100644 --- a/README.md +++ b/README.md @@ -27,6 +27,7 @@ - [Example 6](#example-6) - [Example 7](#example-7) - [Example 8](#example-8) + - [Example 9](#example-9) - [Loading Configuration](#loading-configuration) - [File Based Configuration Loading](#file-based-configuration-loading) - [xDS Management Server Based Configuration Loading](#xds-management-server-based-configuration-loading) @@ -572,6 +573,25 @@ rather than the normal "key1" "key1_value1" +#### Example 9 + +Value supports wildcard matching to apply rate-limit for nested endpoints: + +``` +(key_1, value_1): 20 / sec +(key_1, value_2): 20 / sec +``` + +```yaml +domain: example9 +descriptors: + - key: key1 + value: value* + rate_limit: + unit: minute + requests_per_unit: 20 +``` + ## Loading Configuration Rate limit service supports following configuration loading methods. You can define which methods to use by configuring environment variable `CONFIG_TYPE`. diff --git a/src/config/config_impl.go b/src/config/config_impl.go index 47ea3633..5fb6a0d1 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -40,8 +40,9 @@ type YamlRoot struct { } type rateLimitDescriptor struct { - descriptors map[string]*rateLimitDescriptor - limit *RateLimit + descriptors map[string]*rateLimitDescriptor + limit *RateLimit + wildcardKeys []string } type rateLimitDomain struct { @@ -184,9 +185,14 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p logger.Debugf( "loading descriptor: key=%s%s", newParentKey, rateLimitDebugString) - newDescriptor := &rateLimitDescriptor{map[string]*rateLimitDescriptor{}, rateLimit} + newDescriptor := &rateLimitDescriptor{map[string]*rateLimitDescriptor{}, rateLimit, nil} newDescriptor.loadDescriptors(config, newParentKey+".", descriptorConfig.Descriptors, statsManager) this.descriptors[finalKey] = newDescriptor + + // Preload keys ending with "*" symbol. + if finalKey[len(finalKey)-1:] == "*" { + this.wildcardKeys = append(this.wildcardKeys, finalKey) + } } } @@ -256,7 +262,7 @@ func (this *rateLimitConfigImpl) loadConfig(config RateLimitConfigToLoad) { } logger.Debugf("loading domain: %s", root.Domain) - newDomain := &rateLimitDomain{rateLimitDescriptor{map[string]*rateLimitDescriptor{}, nil}} + newDomain := &rateLimitDomain{rateLimitDescriptor{map[string]*rateLimitDescriptor{}, nil, nil}} newDomain.loadDescriptors(config, root.Domain+".", root.Descriptors, this.statsManager) this.domains[root.Domain] = newDomain } @@ -305,6 +311,16 @@ func (this *rateLimitConfigImpl) GetLimit( finalKey := entry.Key + "_" + entry.Value logger.Debugf("looking up key: %s", finalKey) nextDescriptor := descriptorsMap[finalKey] + + if nextDescriptor == nil && len(value.wildcardKeys) > 0 { + for _, wildcardKey := range value.wildcardKeys { + if strings.HasPrefix(finalKey, strings.TrimSuffix(wildcardKey, "*")) { + nextDescriptor = descriptorsMap[wildcardKey] + break + } + } + } + if nextDescriptor == nil { finalKey = entry.Key logger.Debugf("looking up key: %s", finalKey) diff --git a/test/config/config_test.go b/test/config/config_test.go index bbab774b..e1f57469 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -565,3 +565,62 @@ func TestShadowModeConfig(t *testing.T) { assert.EqualValues(1, stats.NewCounter("test-domain.key2_value2.near_limit").Value()) assert.EqualValues(0, stats.NewCounter("test-domain.key2_value2.shadow_mode").Value()) } + +func TestWildcardConfig(t *testing.T) { + assert := assert.New(t) + stats := stats.NewStore(stats.NewNullSink(), false) + rlConfig := config.NewRateLimitConfigImpl(loadFile("wildcard.yaml"), mockstats.NewMockStatManager(stats), false) + rlConfig.Dump() + + // Baseline to show wildcard works like no value + withoutVal1 := rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "noVal", Value: "foo1"}}, + }) + withoutVal2 := rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "noVal", Value: "foo2"}}, + }) + assert.NotNil(withoutVal1) + assert.Equal(withoutVal1, withoutVal2) + + // Matches multiple wildcard values and results are equal + wildcard1 := rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "wild", Value: "foo1"}}, + }) + wildcard2 := rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "wild", Value: "foo2"}}, + }) + assert.NotNil(wildcard1) + assert.Equal(wildcard1, wildcard2) + + // Doesn't match non-matching values + noMatch := rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "wild", Value: "bar"}}, + }) + assert.Nil(noMatch) + + // Non-wildcard values don't eager match + eager := rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "noWild", Value: "foo1"}}, + }) + assert.Nil(eager) + + // Wildcard in the middle of value is not supported. + midWildcard := rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "midWildcard", Value: "barab"}}, + }) + assert.Nil(midWildcard) +} diff --git a/test/config/wildcard.yaml b/test/config/wildcard.yaml new file mode 100644 index 00000000..9562c6cd --- /dev/null +++ b/test/config/wildcard.yaml @@ -0,0 +1,22 @@ +# Basic configuration for testing. +domain: test-domain +descriptors: + - key: wild + value: foo* + rate_limit: + unit: minute + requests_per_unit: 20 + - key: noWild + value: foo + rate_limit: + unit: minute + requests_per_unit: 20 + - key: noVal + rate_limit: + unit: minute + requests_per_unit: 20 + - key: midWild + value: bar*b + rate_limit: + unit: minute + requests_per_unit: 20 From 542a6047ad6962d34f36cccc486afe2c438fbacc Mon Sep 17 00:00:00 2001 From: Marcin Skalski Date: Mon, 3 Apr 2023 17:58:48 +0200 Subject: [PATCH 058/181] Add possibility to provide additional headers to grpc request in xds sotw provider (#406) Signed-off-by: Marcin Skalski --- README.md | 5 +++++ src/provider/xds_grpc_sotw_provider.go | 4 +++- src/settings/settings.go | 9 +++++---- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 4b608150..8c974a54 100644 --- a/README.md +++ b/README.md @@ -665,6 +665,11 @@ As well Ratelimit supports TLS connections, these can be configured using the fo 2. `CONFIG_GRPC_XDS_CLIENT_TLS_CERT`, `CONFIG_GRPC_XDS_CLIENT_TLS_KEY`, and `CONFIG_GRPC_XDS_SERVER_TLS_CACERT` to provides files to specify a TLS connection configuration to the xDS configuration management server. 3. `CONFIG_GRPC_XDS_SERVER_TLS_SAN`: (Optional) Override the SAN value to validate from the server certificate. +When using xDS you can configure extra headers that will be added to GRPC requests to the xDS Management server. +Extra headers can be useful for providing additional authorization information. This can be configured using the following environment variable: + +`CONFIG_GRPC_XDS_CLIENT_ADDITIONAL_HEADERS` - set to `","` to add multiple headers to GRPC requests. + ## Log Format A centralized log collection system works better with logs in json format. JSON format avoids the need for custom parsing rules. diff --git a/src/provider/xds_grpc_sotw_provider.go b/src/provider/xds_grpc_sotw_provider.go index 3b891979..fcacc212 100644 --- a/src/provider/xds_grpc_sotw_provider.go +++ b/src/provider/xds_grpc_sotw_provider.go @@ -5,6 +5,8 @@ import ( "fmt" "strings" + "google.golang.org/grpc/metadata" + corev3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" "github.com/envoyproxy/go-control-plane/pkg/resource/v3" "github.com/golang/protobuf/ptypes/any" @@ -39,7 +41,7 @@ type XdsGrpcSotwProvider struct { // NewXdsGrpcSotwProvider initializes xDS listener and returns the xDS provider. func NewXdsGrpcSotwProvider(settings settings.Settings, statsManager stats.Manager) RateLimitConfigProvider { - ctx := context.Background() + ctx := metadata.NewOutgoingContext(context.Background(), metadata.New(settings.ConfigGrpcXdsClientAdditionalHeaders)) p := &XdsGrpcSotwProvider{ settings: settings, statsManager: statsManager, diff --git a/src/settings/settings.go b/src/settings/settings.go index 593b48d5..47a96c24 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -54,10 +54,11 @@ type Settings struct { // xDS rate limit configuration // ConfigGrpcXdsNodeId is the Node ID. xDS server should set snapshots to this Node ID - ConfigGrpcXdsNodeId string `envconfig:"CONFIG_GRPC_XDS_NODE_ID" default:"default"` - ConfigGrpcXdsNodeMetadata string `envconfig:"CONFIG_GRPC_XDS_NODE_METADATA" default:""` // eg: "key1:val1,key2=val2" - ConfigGrpcXdsServerUrl string `envconfig:"CONFIG_GRPC_XDS_SERVER_URL" default:"localhost:18000"` - ConfigGrpcXdsServerConnectRetryInterval time.Duration `envconfig:"CONFIG_GRPC_XDS_SERVER_CONNECT_RETRY_INTERVAL" default:"3s"` + ConfigGrpcXdsNodeId string `envconfig:"CONFIG_GRPC_XDS_NODE_ID" default:"default"` + ConfigGrpcXdsNodeMetadata string `envconfig:"CONFIG_GRPC_XDS_NODE_METADATA" default:""` // eg: "key1:val1,key2=val2" + ConfigGrpcXdsServerUrl string `envconfig:"CONFIG_GRPC_XDS_SERVER_URL" default:"localhost:18000"` + ConfigGrpcXdsServerConnectRetryInterval time.Duration `envconfig:"CONFIG_GRPC_XDS_SERVER_CONNECT_RETRY_INTERVAL" default:"3s"` + ConfigGrpcXdsClientAdditionalHeaders map[string]string `envconfig:"CONFIG_GRPC_XDS_CLIENT_ADDITIONAL_HEADERS" default:""` // xDS config server TLS configurations ConfigGrpcXdsTlsConfig *tls.Config From 58b9f5be2e6710d431f96d6d94922e063ba797af Mon Sep 17 00:00:00 2001 From: Peter Leng Date: Tue, 4 Apr 2023 09:02:36 -0700 Subject: [PATCH 059/181] Fix flaky health check tests (#408) Signed-off-by: yleng --- src/service/ratelimit.go | 7 ++++--- test/service/ratelimit_test.go | 21 ++++++--------------- 2 files changed, 10 insertions(+), 18 deletions(-) diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index ed6e888f..e918d9bb 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -34,6 +34,7 @@ var tracer = otel.Tracer("ratelimit") type RateLimitServiceServer interface { pb.RateLimitServiceServer GetCurrentConfig() (config.RateLimitConfig, bool) + SetConfig(updateEvent provider.ConfigUpdateEvent, healthyWithAtLeastOneConfigLoad bool) } type service struct { @@ -51,7 +52,7 @@ type service struct { globalShadowMode bool } -func (this *service) setConfig(updateEvent provider.ConfigUpdateEvent, healthyWithAtLeastOneConfigLoad bool) { +func (this *service) SetConfig(updateEvent provider.ConfigUpdateEvent, healthyWithAtLeastOneConfigLoad bool) { newConfig, err := updateEvent.GetConfig() if err != nil { configError, ok := err.(config.RateLimitConfigError) @@ -330,7 +331,7 @@ func NewService(cache limiter.RateLimitCache, configProvider provider.RateLimitC if !forceStart { logger.Info("Waiting for initial ratelimit config update event") - newService.setConfig(<-newService.configUpdateEvent, healthyWithAtLeastOneConfigLoad) + newService.SetConfig(<-newService.configUpdateEvent, healthyWithAtLeastOneConfigLoad) logger.Info("Successfully loaded the initial ratelimit configs") } @@ -339,7 +340,7 @@ func NewService(cache limiter.RateLimitCache, configProvider provider.RateLimitC logger.Debug("Waiting for config update event") updateEvent := <-newService.configUpdateEvent logger.Debug("Setting config retrieved from config provider") - newService.setConfig(updateEvent, healthyWithAtLeastOneConfigLoad) + newService.SetConfig(updateEvent, healthyWithAtLeastOneConfigLoad) } }() diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index ba1595bb..4d2cde53 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -616,7 +616,6 @@ func TestServiceHealthStatus(test *testing.T) { func TestServiceHealthStatusAtLeastOneConfigLoaded(test *testing.T) { t := commonSetup(test) - barrier := newBarrier() defer t.controller.Finish() defer signal.Reset(syscall.SIGTERM) @@ -627,8 +626,10 @@ func TestServiceHealthStatusAtLeastOneConfigLoaded(test *testing.T) { // Set up the service t.configProvider.EXPECT().ConfigUpdateEvent().Return(t.configUpdateEventChan).Times(1) - _ = ratelimit.NewService(t.cache, t.configProvider, t.statsManager, hc, MockClock{now: int64(2222)}, false, true, healthyWithAtLeastOneConfigLoaded) - + t.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { + return t.config, nil + }).Times(2) + service := ratelimit.NewService(t.cache, t.configProvider, t.statsManager, hc, MockClock{now: int64(2222)}, false, true, healthyWithAtLeastOneConfigLoaded) // Health check request req := &healthpb.HealthCheckRequest{ Service: "ratelimit", @@ -641,15 +642,10 @@ func TestServiceHealthStatusAtLeastOneConfigLoaded(test *testing.T) { } // Force a config load - config event from config provider. - t.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { - return t.config, nil - }) t.config.EXPECT().IsEmptyDomains().DoAndReturn(func() bool { - barrier.signal() return false }).Times(1) - t.configUpdateEventChan <- t.configUpdateEvent - barrier.wait() + service.SetConfig(t.configUpdateEvent, healthyWithAtLeastOneConfigLoaded) // Service should report healthy since config loaded res, _ = grpcHealthServer.Check(context.Background(), req) @@ -658,15 +654,10 @@ func TestServiceHealthStatusAtLeastOneConfigLoaded(test *testing.T) { } // Force reload of an invalid config with no domains - config event from config provider. - t.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { - return t.config, nil - }) t.config.EXPECT().IsEmptyDomains().DoAndReturn(func() bool { - barrier.signal() return true }).Times(1) - t.configUpdateEventChan <- t.configUpdateEvent - barrier.wait() + service.SetConfig(t.configUpdateEvent, healthyWithAtLeastOneConfigLoaded) // Service should report unhealthy since no config loaded at start res, _ = grpcHealthServer.Check(context.Background(), req) From 3e7b643223042675354a851a1f6b7ad21581488f Mon Sep 17 00:00:00 2001 From: seanwinterberger <45638480+seanwinterberger@users.noreply.github.com> Date: Fri, 14 Apr 2023 10:09:30 -0500 Subject: [PATCH 060/181] add flag to skip hostname verification for redis connection (#409) Signed-off-by: Sean Winterberger --- README.md | 1 + src/client_cmd/main.go | 2 +- src/settings/settings.go | 13 ++++++----- src/utils/tls.go | 33 ++++++++++++++++++++++++++-- test/integration/integration_test.go | 3 ++- 5 files changed, 42 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 8c974a54..82f7e9d2 100644 --- a/README.md +++ b/README.md @@ -899,6 +899,7 @@ As well Ratelimit supports TLS connections and authentication. These can be conf 1. `REDIS_TLS` & `REDIS_PERSECOND_TLS`: set to `"true"` to enable a TLS connection for the specific connection type. 1. `REDIS_TLS_CLIENT_CERT`, `REDIS_TLS_CLIENT_KEY`, and `REDIS_TLS_CACERT` to provides files to specify a TLS connection configuration to Redis server that requires client certificate verification. (This is effective when `REDIS_TLS` or `REDIS_PERSECOND_TLS` is set to to `"true"`). +1. `REDIS_TLS_SKIP_HOSTNAME_VERIFICATION` set to `"true"` will skip hostname verification in environments where the certificate has an invalid hostname, such as GCP Memorystore. 1. `REDIS_AUTH` & `REDIS_PERSECOND_AUTH`: set to `"password"` to enable password-only authentication to the redis host. 1. `REDIS_AUTH` & `REDIS_PERSECOND_AUTH`: set to `"username:password"` to enable username-password authentication to the redis host. 1. `CACHE_KEY_PREFIX`: a string to prepend to all cache keys diff --git a/src/client_cmd/main.go b/src/client_cmd/main.go index 57a25e50..a3750f99 100644 --- a/src/client_cmd/main.go +++ b/src/client_cmd/main.go @@ -92,7 +92,7 @@ func main() { grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor()), } if *grpcUseTLS { - tlsConfig := utils.TlsConfigFromFiles(*grpcTlsCertFile, *grpcTlsKeyFile, *grpcServerTlsCACert, utils.ServerCA) + tlsConfig := utils.TlsConfigFromFiles(*grpcTlsCertFile, *grpcTlsKeyFile, *grpcServerTlsCACert, utils.ServerCA, false) dialOptions = append(dialOptions, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) } else { dialOptions = append(dialOptions, grpc.WithInsecure()) diff --git a/src/settings/settings.go b/src/settings/settings.go index 47a96c24..138d73e9 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -111,9 +111,10 @@ type Settings struct { // TODO: Make this setting configurable out of the box instead of having to provide it through code. RedisTlsConfig *tls.Config // Allow to set the client certificate and key for TLS connections. - RedisTlsClientCert string `envconfig:"REDIS_TLS_CLIENT_CERT" default:""` - RedisTlsClientKey string `envconfig:"REDIS_TLS_CLIENT_KEY" default:""` - RedisTlsCACert string `envconfig:"REDIS_TLS_CACERT" default:""` + RedisTlsClientCert string `envconfig:"REDIS_TLS_CLIENT_CERT" default:""` + RedisTlsClientKey string `envconfig:"REDIS_TLS_CLIENT_KEY" default:""` + RedisTlsCACert string `envconfig:"REDIS_TLS_CACERT" default:""` + RedisTlsSkipHostnameVerification bool `envconfig:"REDIS_TLS_SKIP_HOSTNAME_VERIFICATION" default:"false"` // RedisPipelineWindow sets the duration after which internal pipelines will be flushed. // If window is zero then implicit pipelining will be disabled. Radix use 150us for the @@ -187,7 +188,7 @@ func RedisTlsConfig(redisTls bool) Option { // so let's just initialize to what we want the correct value to be. s.RedisTlsConfig = &tls.Config{} if redisTls { - s.RedisTlsConfig = utils.TlsConfigFromFiles(s.RedisTlsClientCert, s.RedisTlsClientKey, s.RedisTlsCACert, utils.ServerCA) + s.RedisTlsConfig = utils.TlsConfigFromFiles(s.RedisTlsClientCert, s.RedisTlsClientKey, s.RedisTlsCACert, utils.ServerCA, s.RedisTlsSkipHostnameVerification) } } } @@ -195,7 +196,7 @@ func RedisTlsConfig(redisTls bool) Option { func GrpcServerTlsConfig() Option { return func(s *Settings) { if s.GrpcServerUseTLS { - grpcServerTlsConfig := utils.TlsConfigFromFiles(s.GrpcServerTlsCert, s.GrpcServerTlsKey, s.GrpcClientTlsCACert, utils.ClientCA) + grpcServerTlsConfig := utils.TlsConfigFromFiles(s.GrpcServerTlsCert, s.GrpcServerTlsKey, s.GrpcClientTlsCACert, utils.ClientCA, false) if s.GrpcClientTlsCACert != "" { grpcServerTlsConfig.ClientAuth = tls.RequireAndVerifyClientCert } else { @@ -209,7 +210,7 @@ func GrpcServerTlsConfig() Option { func ConfigGrpcXdsServerTlsConfig() Option { return func(s *Settings) { if s.ConfigGrpcXdsServerUseTls { - configGrpcXdsServerTlsConfig := utils.TlsConfigFromFiles(s.ConfigGrpcXdsClientTlsCert, s.ConfigGrpcXdsClientTlsKey, s.ConfigGrpcXdsServerTlsCACert, utils.ServerCA) + configGrpcXdsServerTlsConfig := utils.TlsConfigFromFiles(s.ConfigGrpcXdsClientTlsCert, s.ConfigGrpcXdsClientTlsKey, s.ConfigGrpcXdsServerTlsCACert, utils.ServerCA, false) if s.ConfigGrpcXdsServerTlsCACert != "" { configGrpcXdsServerTlsConfig.ClientAuth = tls.RequireAndVerifyClientCert } else { diff --git a/src/utils/tls.go b/src/utils/tls.go index 76085e22..3e200757 100644 --- a/src/utils/tls.go +++ b/src/utils/tls.go @@ -3,6 +3,7 @@ package utils import ( "crypto/tls" "crypto/x509" + "errors" "fmt" "os" ) @@ -15,8 +16,35 @@ const ( ) // TlsConfigFromFiles sets the TLS config from the provided files. -func TlsConfigFromFiles(certFile, keyFile, caCertFile string, caType CAType) *tls.Config { - config := &tls.Config{} +func TlsConfigFromFiles(certFile, keyFile, caCertFile string, caType CAType, skipHostnameVerification bool) *tls.Config { + config := &tls.Config{ + InsecureSkipVerify: skipHostnameVerification, + } + if skipHostnameVerification { + // Based upon https://github.com/golang/go/blob/d67d044310bc5cc1c26b60caf23a58602e9a1946/src/crypto/tls/example_test.go#L187 + config.VerifyPeerCertificate = func(certificates [][]byte, verifiedChains [][]*x509.Certificate) error { + certs := make([]*x509.Certificate, len(certificates)) + for i, asn1Data := range certificates { + cert, err := x509.ParseCertificate(asn1Data) + if err != nil { + return errors.New("tls: failed to parse certificate from server: " + err.Error()) + } + certs[i] = cert + } + + opts := x509.VerifyOptions{ + Roots: config.RootCAs, + DNSName: "", + Intermediates: x509.NewCertPool(), + } + for _, cert := range certs[1:] { + opts.Intermediates.AddCert(cert) + } + _, err := certs[0].Verify(opts) + return err + } + } + if certFile != "" && keyFile != "" { tlsKeyPair, err := tls.LoadX509KeyPair(certFile, keyFile) if err != nil { @@ -40,6 +68,7 @@ func TlsConfigFromFiles(certFile, keyFile, caCertFile string, caType CAType) *tl config.RootCAs = certPool } } + return config } diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index ad9a7373..c8861a3a 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -261,6 +261,7 @@ func Test_mTLS(t *testing.T) { s.RedisTlsConfig = &tls.Config{} s.RedisAuth = "password123" s.RedisTls = true + s.RedisTlsSkipHostnameVerification = false s.RedisPerSecondAuth = "password123" s.RedisPerSecondTls = true assert := assert.New(t) @@ -276,7 +277,7 @@ func Test_mTLS(t *testing.T) { settings.GrpcServerTlsConfig()(&s) runner := startTestRunner(t, s) defer runner.Stop() - clientTlsConfig := utils.TlsConfigFromFiles(clientCertFile, clientCertKey, serverCAFile, utils.ServerCA) + clientTlsConfig := utils.TlsConfigFromFiles(clientCertFile, clientCertKey, serverCAFile, utils.ServerCA, false) conn, err := grpc.Dial(fmt.Sprintf("localhost:%v", s.GrpcPort), grpc.WithTransportCredentials(credentials.NewTLS(clientTlsConfig))) assert.NoError(err) defer conn.Close() From 278a7c2a9a18a2fe101ff02090c00bdc4904181f Mon Sep 17 00:00:00 2001 From: Arko Dasgupta Date: Wed, 19 Apr 2023 10:31:32 -0700 Subject: [PATCH 061/181] Bump to golang 1.20 (#410) Signed-off-by: Arko Dasgupta --- .github/workflows/main.yaml | 2 +- .github/workflows/pullrequest.yaml | 2 +- Dockerfile | 2 +- Dockerfile.integration | 2 +- docker-compose.yml | 4 ++-- examples/xds-sotw-config-server/go.mod | 2 +- go.mod | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 10fd7468..ee51db57 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -46,7 +46,7 @@ jobs: - uses: actions/setup-go@v2 with: - go-version: "1.18" + go-version: "1.20" - name: run pre-commits run: | diff --git a/.github/workflows/pullrequest.yaml b/.github/workflows/pullrequest.yaml index 527b4523..93d9dbf7 100644 --- a/.github/workflows/pullrequest.yaml +++ b/.github/workflows/pullrequest.yaml @@ -34,7 +34,7 @@ jobs: - uses: actions/setup-go@v2 with: - go-version: "1.18" + go-version: "1.20" - name: run pre-commits run: | diff --git a/Dockerfile b/Dockerfile index 685af6d1..5c6fb7af 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.18 AS build +FROM golang:1.20 AS build WORKDIR /ratelimit ENV GOPROXY=https://proxy.golang.org diff --git a/Dockerfile.integration b/Dockerfile.integration index f1ef81ff..821f1c46 100644 --- a/Dockerfile.integration +++ b/Dockerfile.integration @@ -1,5 +1,5 @@ # Running this docker image runs the integration tests. -FROM golang:1.18 +FROM golang:1.20 RUN apt-get update -y && apt-get install sudo stunnel4 redis memcached -y && rm -rf /var/lib/apt/lists/* diff --git a/docker-compose.yml b/docker-compose.yml index ad92837d..bd5807ce 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -20,7 +20,7 @@ services: # minimal container that builds the ratelimit service binary and exits. ratelimit-build: - image: golang:1.18-alpine + image: golang:1.20-alpine working_dir: /go/src/github.com/envoyproxy/ratelimit command: go build -o /usr/local/bin/ratelimit ./src/service_cmd/main.go volumes: @@ -28,7 +28,7 @@ services: - binary:/usr/local/bin/ ratelimit-client-build: - image: golang:1.18-alpine + image: golang:1.20-alpine working_dir: /go/src/github.com/envoyproxy/ratelimit command: go build -o /usr/local/bin/ratelimit_client ./src/client_cmd/main.go volumes: diff --git a/examples/xds-sotw-config-server/go.mod b/examples/xds-sotw-config-server/go.mod index 7c5ac4cd..8e282038 100644 --- a/examples/xds-sotw-config-server/go.mod +++ b/examples/xds-sotw-config-server/go.mod @@ -1,6 +1,6 @@ module github.com/envoyproxy/ratelimit/examples/xds-sotw-config-server -go 1.18 +go 1.20 require ( github.com/envoyproxy/go-control-plane v0.10.3-0.20230127155013-72157d335c8f diff --git a/go.mod b/go.mod index 3ed66726..2c51a68a 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/envoyproxy/ratelimit -go 1.18 +go 1.20 require ( github.com/alicebob/miniredis/v2 v2.23.0 From 42dc463c2911017f5c0101c22d71c537c25f6543 Mon Sep 17 00:00:00 2001 From: Vikas Palaskar Date: Wed, 19 Apr 2023 10:36:50 -0700 Subject: [PATCH 062/181] Use 0.7.0 version for golang.org/x/net to fix the vulnerability CVE-2022-41723 (#416) Signed-off-by: Vikas Palaskar --- examples/xds-sotw-config-server/go.mod | 8 +++++--- examples/xds-sotw-config-server/go.sum | 14 ++++++++++++++ go.mod | 6 +++--- go.sum | 6 ++++++ 4 files changed, 28 insertions(+), 6 deletions(-) diff --git a/examples/xds-sotw-config-server/go.mod b/examples/xds-sotw-config-server/go.mod index 8e282038..218bb048 100644 --- a/examples/xds-sotw-config-server/go.mod +++ b/examples/xds-sotw-config-server/go.mod @@ -12,9 +12,11 @@ require ( github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc // indirect github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect github.com/golang/protobuf v1.5.2 // indirect - golang.org/x/net v0.4.0 // indirect - golang.org/x/sys v0.3.0 // indirect - golang.org/x/text v0.5.0 // indirect + github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect + github.com/sirupsen/logrus v1.6.0 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect google.golang.org/protobuf v1.28.1 // indirect ) diff --git a/examples/xds-sotw-config-server/go.sum b/examples/xds-sotw-config-server/go.sum index d82360b9..7382fd4f 100644 --- a/examples/xds-sotw-config-server/go.sum +++ b/examples/xds-sotw-config-server/go.sum @@ -7,6 +7,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc h1:PYXxkRUBGUMa5xgMVMDl62vEklZvKpVaxQeN9ie7Hfk= github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.10.3-0.20230127155013-72157d335c8f h1:nqACgqiYlDnB0znidh+8uhnQVLeqfW5NyyRfnGibowc= github.com/envoyproxy/go-control-plane v0.10.3-0.20230127155013-72157d335c8f/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= @@ -23,8 +24,14 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -37,17 +44,24 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= diff --git a/go.mod b/go.mod index 2c51a68a..b871c233 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/mediocregopher/radix/v3 v3.8.1 github.com/sirupsen/logrus v1.6.0 github.com/stretchr/testify v1.8.1 - golang.org/x/net v0.4.0 + golang.org/x/net v0.7.0 google.golang.org/grpc v1.52.0 google.golang.org/protobuf v1.28.1 gopkg.in/yaml.v2 v2.3.0 @@ -56,6 +56,6 @@ require ( go.opentelemetry.io/otel/sdk v1.7.0 go.opentelemetry.io/otel/trace v1.7.0 go.opentelemetry.io/proto/otlp v0.19.0 // indirect - golang.org/x/sys v0.3.0 // indirect - golang.org/x/text v0.5.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect ) diff --git a/go.sum b/go.sum index efe276bd..ad5f5b8e 100644 --- a/go.sum +++ b/go.sum @@ -323,6 +323,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -375,6 +377,8 @@ golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -384,6 +388,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From e2a87f41d3a78d54eda197b3d49fcdcbca9a4f48 Mon Sep 17 00:00:00 2001 From: Daniel Hoelbling-Inzko Date: Thu, 27 Apr 2023 16:24:04 +0200 Subject: [PATCH 063/181] Fixed otel typo in (#417) Signed-off-by: Daniel Hoelbling-Inzko --- src/redis/fixed_cache_impl.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index a61b319f..16a085bf 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -19,7 +19,7 @@ import ( "github.com/envoyproxy/ratelimit/src/utils" ) -var tracer = otel.Tracer("redis.fixedCacaheImpl") +var tracer = otel.Tracer("redis.fixedCacheImpl") type fixedRateLimitCacheImpl struct { client Client From b3562caaa3393766cebeb69e34281d3565154c56 Mon Sep 17 00:00:00 2001 From: Amiram Wingarten Date: Fri, 12 May 2023 06:35:15 +0300 Subject: [PATCH 064/181] fix wildcard support to cover nested descriptors (#420) Signed-off-by: Amiram Wingarten --- src/config/config_impl.go | 6 ++++-- test/config/config_test.go | 6 ++++++ test/config/wildcard.yaml | 8 ++++++++ 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/src/config/config_impl.go b/src/config/config_impl.go index 5fb6a0d1..1a537951 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -305,6 +305,7 @@ func (this *rateLimitConfigImpl) GetLimit( } descriptorsMap := value.descriptors + prevDescriptor := &value.rateLimitDescriptor for i, entry := range descriptor.Entries { // First see if key_value is in the map. If that isn't in the map we look for just key // to check for a default value. @@ -312,8 +313,8 @@ func (this *rateLimitConfigImpl) GetLimit( logger.Debugf("looking up key: %s", finalKey) nextDescriptor := descriptorsMap[finalKey] - if nextDescriptor == nil && len(value.wildcardKeys) > 0 { - for _, wildcardKey := range value.wildcardKeys { + if nextDescriptor == nil && len(prevDescriptor.wildcardKeys) > 0 { + for _, wildcardKey := range prevDescriptor.wildcardKeys { if strings.HasPrefix(finalKey, strings.TrimSuffix(wildcardKey, "*")) { nextDescriptor = descriptorsMap[wildcardKey] break @@ -347,6 +348,7 @@ func (this *rateLimitConfigImpl) GetLimit( break } + prevDescriptor = nextDescriptor } return rateLimit diff --git a/test/config/config_test.go b/test/config/config_test.go index e1f57469..ed131110 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -597,8 +597,14 @@ func TestWildcardConfig(t *testing.T) { &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "wild", Value: "foo2"}}, }) + wildcard3 := rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "nestedWild", Value: "val1"}, {Key: "wild", Value: "goo2"}}, + }) assert.NotNil(wildcard1) assert.Equal(wildcard1, wildcard2) + assert.NotNil(wildcard3) // Doesn't match non-matching values noMatch := rlConfig.GetLimit( diff --git a/test/config/wildcard.yaml b/test/config/wildcard.yaml index 9562c6cd..e99bac66 100644 --- a/test/config/wildcard.yaml +++ b/test/config/wildcard.yaml @@ -20,3 +20,11 @@ descriptors: rate_limit: unit: minute requests_per_unit: 20 + - key: nestedWild + value: val1 + descriptors: + - key: wild + value: goo* + rate_limit: + unit: minute + requests_per_unit: 20 From 8f6a200742caed7ce52c9530e5c8d540001b7851 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Wed, 24 May 2023 10:40:13 -0400 Subject: [PATCH 065/181] [docs] fix spelling errors in the readme (#425) Signed-off-by: Raul Gutierrez Segales --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 82f7e9d2..7c9fb6a2 100644 --- a/README.md +++ b/README.md @@ -295,7 +295,7 @@ There is also a Global Shadow Mode ### Including detailed metrics for unspecified values -Setting the `detailed_metric: true` for a descriptor will extend the metrics that are produced. Normally a desriptor that matches a value that is not explicitly listed in the configuration will from a metrics point-of-view be rolled-up into the base entry. This can be probelmatic if you want to have those details available for analysis. +Setting the `detailed_metric: true` for a descriptor will extend the metrics that are produced. Normally a descriptor that matches a value that is not explicitly listed in the configuration will from a metrics point-of-view be rolled-up into the base entry. This can be problematic if you want to have those details available for analysis. NB! This should only be enabled in situations where the potentially large cardinality of metrics that this can lead to is acceptable. @@ -540,7 +540,7 @@ descriptors: In this example we demonstrate how a descriptor without a specified value is configured to override the default behavior and include the matched-value in the metrics. -Rate limting configuration and tracking works as normally +Rate limiting configuration and tracking works as normally ``` (key_1, unspecified_value): 10 / sec From ce3d747991afdd36e7c95f1694fc98fa670dc613 Mon Sep 17 00:00:00 2001 From: Seonghyun Oh Date: Wed, 31 May 2023 00:07:05 +0900 Subject: [PATCH 066/181] Increment shadow metric regardless of local cache (#424) Signed-off-by: Seonghyun Oh --- src/limiter/base_limiter.go | 22 ++++++++++++++++------ src/redis/fixed_cache_impl.go | 4 +--- test/redis/fixed_cache_impl_test.go | 13 ++++++++----- 3 files changed, 25 insertions(+), 14 deletions(-) diff --git a/src/limiter/base_limiter.go b/src/limiter/base_limiter.go index f1a7de22..4024cc60 100644 --- a/src/limiter/base_limiter.go +++ b/src/limiter/base_limiter.go @@ -80,9 +80,9 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo * nil, 0) } var responseDescriptorStatus *pb.RateLimitResponse_DescriptorStatus - over_limit := false + isOverLimit := false if isOverLimitWithLocalCache { - over_limit = true + isOverLimit = true limitInfo.limit.Stats.OverLimit.Add(uint64(hitsAddend)) limitInfo.limit.Stats.OverLimitWithLocalCache.Add(uint64(hitsAddend)) responseDescriptorStatus = this.generateResponseDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT, @@ -94,7 +94,7 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo * limitInfo.nearLimitThreshold = uint32(math.Floor(float64(float32(limitInfo.overLimitThreshold) * this.nearLimitRatio))) logger.Debugf("cache key: %s current: %d", key, limitInfo.limitAfterIncrease) if limitInfo.limitAfterIncrease > limitInfo.overLimitThreshold { - over_limit = true + isOverLimit = true responseDescriptorStatus = this.generateResponseDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT, limitInfo.limit.Limit, 0) @@ -124,11 +124,11 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo * } // If the limit is in ShadowMode, it should be always return OK - // We only want to increase stats if the limit was actually over the limit - if over_limit && limitInfo.limit.ShadowMode { + if isOverLimit && limitInfo.limit.ShadowMode { logger.Debugf("Limit with key %s, is in shadow_mode", limitInfo.limit.FullKey) responseDescriptorStatus.Code = pb.RateLimitResponse_OK - limitInfo.limit.Stats.ShadowMode.Add(uint64(hitsAddend)) + // Increase shadow mode stats if the limit was actually over the limit + this.increaseShadowModeStats(isOverLimitWithLocalCache, limitInfo, hitsAddend) } return responseDescriptorStatus @@ -178,6 +178,16 @@ func (this *BaseRateLimiter) checkNearLimitThreshold(limitInfo *LimitInfo, hitsA } } +func (this *BaseRateLimiter) increaseShadowModeStats(isOverLimitWithLocalCache bool, limitInfo *LimitInfo, hitsAddend uint32) { + // Increase shadow mode statistics. For the same reason as over limit stats, + // if the limit value before adding the N hits over the limit, then all N hits were over limit. + if isOverLimitWithLocalCache || limitInfo.limitBeforeIncrease >= limitInfo.overLimitThreshold { + limitInfo.limit.Stats.ShadowMode.Add(uint64(hitsAddend)) + } else { + limitInfo.limit.Stats.ShadowMode.Add(uint64(limitInfo.limitAfterIncrease - limitInfo.overLimitThreshold)) + } +} + func (this *BaseRateLimiter) generateResponseDescriptorStatus(responseCode pb.RateLimitResponse_Code, limit *pb.RateLimitResponse_RateLimit, limitRemaining uint32) *pb.RateLimitResponse_DescriptorStatus { if limit != nil { diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index 16a085bf..2dbb04ff 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -61,14 +61,12 @@ func (this *fixedRateLimitCacheImpl) DoLimit( // Check if key is over the limit in local cache. if this.baseRateLimiter.IsOverLimitWithLocalCache(cacheKey.Key) { - if limits[i].ShadowMode { logger.Debugf("Cache key %s would be rate limited but shadow mode is enabled on this rule", cacheKey.Key) } else { logger.Debugf("cache key is over the limit: %s", cacheKey.Key) - isOverLimitWithLocalCache[i] = true } - + isOverLimitWithLocalCache[i] = true continue } diff --git a/test/redis/fixed_cache_impl_test.go b/test/redis/fixed_cache_impl_test.go index eb025ab5..cbbf33b1 100644 --- a/test/redis/fixed_cache_impl_test.go +++ b/test/redis/fixed_cache_impl_test.go @@ -564,6 +564,7 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) + assert.Equal(uint64(1), limits[0].Stats.ShadowMode.Value()) assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) assert.Equal(uint64(2), limits[0].Stats.WithinLimit.Value()) @@ -579,15 +580,17 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { // The result should be OK since limit is in ShadowMode assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, }, cache.DoLimit(context.Background(), request, limits)) - // TODO: How should we handle statistics? Should there be a separate ShadowMode statistics? Should the other Stats remain as if they were unaffected by shadowmode? + + // Even if you hit the local cache, other metrics should increase normally. assert.Equal(uint64(4), limits[0].Stats.TotalHits.Value()) - assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) - assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) + assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(1), limits[0].Stats.OverLimitWithLocalCache.Value()) + assert.Equal(uint64(2), limits[0].Stats.ShadowMode.Value()) assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) - assert.Equal(uint64(3), limits[0].Stats.WithinLimit.Value()) + assert.Equal(uint64(2), limits[0].Stats.WithinLimit.Value()) // Check the local cache stats. testLocalCacheStats(localCacheStats, statsStore, sink, 1, 3, 4, 0, 1) From 59565c8730687f012b93bf551c7f2a427fd6a19f Mon Sep 17 00:00:00 2001 From: Dong Liu Date: Fri, 2 Jun 2023 07:19:12 +0800 Subject: [PATCH 067/181] Bump alpine base image (#426) For CVE fix, see https://github.com/alpinelinux/docker-alpine/issues/321 Signed-off-by: Dong Liu --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 5c6fb7af..ec874439 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,6 +10,6 @@ COPY script script RUN CGO_ENABLED=0 GOOS=linux go build -o /go/bin/ratelimit -ldflags="-w -s" -v github.com/envoyproxy/ratelimit/src/service_cmd -FROM alpine:3.16 AS final +FROM alpine:3.18 AS final RUN apk --no-cache add ca-certificates && apk --no-cache update COPY --from=build /go/bin/ratelimit /bin/ratelimit From c97749cc2d2d9e453aa7b7fc08b86cbe22a031e8 Mon Sep 17 00:00:00 2001 From: pawel-docu <136368481+pawel-docu@users.noreply.github.com> Date: Wed, 14 Jun 2023 11:14:51 -0700 Subject: [PATCH 068/181] Bump version of net, sys, and text modules. (#432) Signed-off-by: Pawel Lipiec --- go.mod | 6 +++--- go.sum | 18 ++++++------------ 2 files changed, 9 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index b871c233..825101d6 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/mediocregopher/radix/v3 v3.8.1 github.com/sirupsen/logrus v1.6.0 github.com/stretchr/testify v1.8.1 - golang.org/x/net v0.7.0 + golang.org/x/net v0.10.0 google.golang.org/grpc v1.52.0 google.golang.org/protobuf v1.28.1 gopkg.in/yaml.v2 v2.3.0 @@ -56,6 +56,6 @@ require ( go.opentelemetry.io/otel/sdk v1.7.0 go.opentelemetry.io/otel/trace v1.7.0 go.opentelemetry.io/proto/otlp v0.19.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect + golang.org/x/sys v0.8.0 // indirect + golang.org/x/text v0.9.0 // indirect ) diff --git a/go.sum b/go.sum index ad5f5b8e..81fef458 100644 --- a/go.sum +++ b/go.sum @@ -321,10 +321,8 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -375,10 +373,8 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -386,10 +382,8 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From e059638dbe11190264f06eb1425700cfae8728f4 Mon Sep 17 00:00:00 2001 From: Jonas-Taha El Sesiy Date: Tue, 27 Jun 2023 03:29:37 +0200 Subject: [PATCH 069/181] Bump dependencies & docker base images (#434) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit During a recent CVE scan we found envoyproxy to use `alpine:3.18` as the final image ``` grype envoyproxy/ratelimit:59565c87 ✔ Vulnerability DB [no update available] ✔ Pulled image ✔ Loaded image ✔ Parsed image ✔ Cataloged packages [57 packages] ✔ Scanning image... [4 vulnerabilities] ├── 0 critical, 3 high, 1 medium, 0 low, 0 negligible └── 2 fixed NAME INSTALLED FIXED-IN TYPE VULNERABILITY SEVERITY libcrypto3 3.1.0-r4 3.1.1-r0 apk CVE-2023-2650 High libssl3 3.1.0-r4 3.1.1-r0 apk CVE-2023-2650 High ``` Since docker image tags are derived from the git commit sha, triggering a rebuild of the image as is (which implicitly would use alpine:3.18.2 and golang:1.20.5) would result in the image getting replaced with the same commit sha. Instead, we're explicitly setting the version numbers to ensure any version update is tied to a commit. Signed-off-by: Jonas-Taha El Sesiy --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index ec874439..6ae75660 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20 AS build +FROM golang:1.20.5 AS build WORKDIR /ratelimit ENV GOPROXY=https://proxy.golang.org @@ -10,6 +10,6 @@ COPY script script RUN CGO_ENABLED=0 GOOS=linux go build -o /go/bin/ratelimit -ldflags="-w -s" -v github.com/envoyproxy/ratelimit/src/service_cmd -FROM alpine:3.18 AS final +FROM alpine:3.18.2 AS final RUN apk --no-cache add ca-certificates && apk --no-cache update COPY --from=build /go/bin/ratelimit /bin/ratelimit From 965f0bc861da0d4478071aeca0a5f2c2202257b7 Mon Sep 17 00:00:00 2001 From: Chashika Weerathunga Date: Fri, 11 Aug 2023 21:27:21 +0530 Subject: [PATCH 070/181] Fix incrementing cache keys when overall status is over limited (#430) Signed-off-by: chashikajw Signed-off-by: Tharsanan1 --- src/redis/fixed_cache_impl.go | 13 +++++++++++-- test/integration/integration_test.go | 5 +++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index 2dbb04ff..6f134d53 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -53,6 +53,8 @@ func (this *fixedRateLimitCacheImpl) DoLimit( results := make([]uint32, len(request.Descriptors)) var pipeline, perSecondPipeline Pipeline + hitAddendForRedis := hitsAddend + overlimitIndex := -1 // Now, actually setup the pipeline, skipping empty cache keys. for i, cacheKey := range cacheKeys { if cacheKey.Key == "" { @@ -67,6 +69,13 @@ func (this *fixedRateLimitCacheImpl) DoLimit( logger.Debugf("cache key is over the limit: %s", cacheKey.Key) } isOverLimitWithLocalCache[i] = true + hitAddendForRedis = 0 + overlimitIndex = i + continue + } + } + for i, cacheKey := range cacheKeys { + if cacheKey.Key == "" || overlimitIndex == i { continue } @@ -82,12 +91,12 @@ func (this *fixedRateLimitCacheImpl) DoLimit( if perSecondPipeline == nil { perSecondPipeline = Pipeline{} } - pipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) + pipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, hitAddendForRedis, &results[i], expirationSeconds) } else { if pipeline == nil { pipeline = Pipeline{} } - pipelineAppend(this.client, &pipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) + pipelineAppend(this.client, &pipeline, cacheKey.Key, hitAddendForRedis, &results[i], expirationSeconds) } } diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index c8861a3a..c951d1ab 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -592,6 +592,11 @@ func testBasicBaseConfig(s settings.Settings) func(*testing.T) { if i >= 10 { status = pb.RateLimitResponse_OVER_LIMIT limitRemaining2 = 0 + // Ceased incrementing cached keys upon exceeding the overall rate limit in the Redis cache flow. + // Consequently, the remaining limit should remain unaltered. + if enable_local_cache && s.BackendType != "memcache" { + limitRemaining1 = 9 + } } durRemaining1 := response.GetStatuses()[0].DurationUntilReset durRemaining2 := response.GetStatuses()[1].DurationUntilReset From b1f66f2d8bacde2d5ec2bd7573fcccb3b5c45204 Mon Sep 17 00:00:00 2001 From: Charlie <104772503+ChuckCrawford@users.noreply.github.com> Date: Sat, 9 Sep 2023 20:28:16 -0400 Subject: [PATCH 071/181] Updates Golang minor version to resolve CVEs (#441) Signed-off-by: Charlie Crawford --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 6ae75660..c4d4c85a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20.5 AS build +FROM golang:1.20.7 AS build WORKDIR /ratelimit ENV GOPROXY=https://proxy.golang.org From e0f9f0e4922904e1490b21b19436137e9802af72 Mon Sep 17 00:00:00 2001 From: Chashika Weerathunga Date: Thu, 14 Sep 2023 07:40:54 +0530 Subject: [PATCH 072/181] Fix ratelimit counter issue when using multiple descriptors (#443) Signed-off-by: chashikajw --- README.md | 6 ++ src/limiter/base_limiter.go | 8 ++ src/redis/cache_impl.go | 1 + src/redis/fixed_cache_impl.go | 133 ++++++++++++++++++++------ src/settings/settings.go | 11 ++- test/integration/integration_test.go | 15 ++- test/redis/bench_test.go | 2 +- test/redis/fixed_cache_impl_test.go | 134 +++++++++++++++++++++++++-- 8 files changed, 269 insertions(+), 41 deletions(-) diff --git a/README.md b/README.md index 7c9fb6a2..cf2bf649 100644 --- a/README.md +++ b/README.md @@ -904,6 +904,12 @@ As well Ratelimit supports TLS connections and authentication. These can be conf 1. `REDIS_AUTH` & `REDIS_PERSECOND_AUTH`: set to `"username:password"` to enable username-password authentication to the redis host. 1. `CACHE_KEY_PREFIX`: a string to prepend to all cache keys +For controlling the behavior of cache key incrementation when any of them is already over the limit, you can use the following configuration: + +1. `STOP_CACHE_KEY_INCREMENT_WHEN_OVERLIMIT`: Set this configuration to `true` to disallow key incrementation when one of the keys is already over the limit. + +`STOP_CACHE_KEY_INCREMENT_WHEN_OVERLIMIT` is useful when multiple descriptors are included in a single request. Setting this to `true` can prevent the incrementation of other descriptors' counters if any of the descriptors is already over the limit. + ## Redis type Ratelimit supports different types of redis deployments: diff --git a/src/limiter/base_limiter.go b/src/limiter/base_limiter.go index 4024cc60..d8a6b7cb 100644 --- a/src/limiter/base_limiter.go +++ b/src/limiter/base_limiter.go @@ -71,6 +71,14 @@ func (this *BaseRateLimiter) IsOverLimitWithLocalCache(key string) bool { return false } +func (this *BaseRateLimiter) IsOverLimitThresholdReached(limitInfo *LimitInfo) bool { + limitInfo.overLimitThreshold = limitInfo.limit.Limit.RequestsPerUnit + if limitInfo.limitAfterIncrease > limitInfo.overLimitThreshold { + return true + } + return false +} + // Generates response descriptor status based on cache key, over the limit with local cache, over the limit and // near the limit thresholds. Thresholds are checked in order and are mutually exclusive. func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo *LimitInfo, diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index 715e670d..9ad9731e 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -32,5 +32,6 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca s.NearLimitRatio, s.CacheKeyPrefix, statsManager, + s.StopCacheKeyIncrementWhenOverlimit, ) } diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index 6f134d53..58e40331 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -27,8 +27,9 @@ type fixedRateLimitCacheImpl struct { // If this client is nil, then the Cache will use the client for all // limits regardless of unit. If this client is not nil, then it // is used for limits that have a SECOND unit. - perSecondClient Client - baseRateLimiter *limiter.BaseRateLimiter + perSecondClient Client + stopCacheKeyIncrementWhenOverlimit bool + baseRateLimiter *limiter.BaseRateLimiter } func pipelineAppend(client Client, pipeline *Pipeline, key string, hitsAddend uint32, result *uint32, expirationSeconds int64) { @@ -36,6 +37,10 @@ func pipelineAppend(client Client, pipeline *Pipeline, key string, hitsAddend ui *pipeline = client.PipeAppend(*pipeline, nil, "EXPIRE", key, expirationSeconds) } +func pipelineAppendtoGet(client Client, pipeline *Pipeline, key string, result *uint32) { + *pipeline = client.PipeAppend(*pipeline, result, "GET", key) +} + func (this *fixedRateLimitCacheImpl) DoLimit( ctx context.Context, request *pb.RateLimitRequest, @@ -51,31 +56,97 @@ func (this *fixedRateLimitCacheImpl) DoLimit( isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) results := make([]uint32, len(request.Descriptors)) - var pipeline, perSecondPipeline Pipeline + currentCount := make([]uint32, len(request.Descriptors)) + var pipeline, perSecondPipeline, pipelineToGet, perSecondPipelineToGet Pipeline + + hitsAddendForRedis := hitsAddend + overlimitIndexes := make([]bool, len(request.Descriptors)) + nearlimitIndexes := make([]bool, len(request.Descriptors)) + isCacheKeyOverlimit := false + + if this.stopCacheKeyIncrementWhenOverlimit { + // Check if any of the keys are reaching to the over limit in redis cache. + for i, cacheKey := range cacheKeys { + if cacheKey.Key == "" { + continue + } - hitAddendForRedis := hitsAddend - overlimitIndex := -1 - // Now, actually setup the pipeline, skipping empty cache keys. - for i, cacheKey := range cacheKeys { - if cacheKey.Key == "" { - continue + // Check if key is over the limit in local cache. + if this.baseRateLimiter.IsOverLimitWithLocalCache(cacheKey.Key) { + if limits[i].ShadowMode { + logger.Debugf("Cache key %s would be rate limited but shadow mode is enabled on this rule", cacheKey.Key) + } else { + logger.Debugf("cache key is over the limit: %s", cacheKey.Key) + } + isOverLimitWithLocalCache[i] = true + hitsAddendForRedis = 0 + overlimitIndexes[i] = true + isCacheKeyOverlimit = true + continue + } else { + if this.perSecondClient != nil && cacheKey.PerSecond { + if perSecondPipelineToGet == nil { + perSecondPipelineToGet = Pipeline{} + } + pipelineAppendtoGet(this.perSecondClient, &perSecondPipelineToGet, cacheKey.Key, ¤tCount[i]) + } else { + if pipelineToGet == nil { + pipelineToGet = Pipeline{} + } + pipelineAppendtoGet(this.client, &pipelineToGet, cacheKey.Key, ¤tCount[i]) + } + } } - // Check if key is over the limit in local cache. - if this.baseRateLimiter.IsOverLimitWithLocalCache(cacheKey.Key) { - if limits[i].ShadowMode { - logger.Debugf("Cache key %s would be rate limited but shadow mode is enabled on this rule", cacheKey.Key) - } else { - logger.Debugf("cache key is over the limit: %s", cacheKey.Key) + // Only if none of the cache keys are over the limit, call Redis to check whether cache keys are getting overlimited. + if len(cacheKeys) > 1 && !isCacheKeyOverlimit { + if pipelineToGet != nil { + checkError(this.client.PipeDo(pipelineToGet)) + } + if perSecondPipelineToGet != nil { + checkError(this.perSecondClient.PipeDo(perSecondPipelineToGet)) + } + + for i, cacheKey := range cacheKeys { + if cacheKey.Key == "" { + continue + } + // Now fetch the pipeline. + limitBeforeIncrease := currentCount[i] + limitAfterIncrease := limitBeforeIncrease + hitsAddend + + limitInfo := limiter.NewRateLimitInfo(limits[i], limitBeforeIncrease, limitAfterIncrease, 0, 0) + + if this.baseRateLimiter.IsOverLimitThresholdReached(limitInfo) { + hitsAddendForRedis = 0 + nearlimitIndexes[i] = true + } + } + } + } else { + // Check if any of the keys are reaching to the over limit in redis cache. + for i, cacheKey := range cacheKeys { + if cacheKey.Key == "" { + continue + } + + // Check if key is over the limit in local cache. + if this.baseRateLimiter.IsOverLimitWithLocalCache(cacheKey.Key) { + if limits[i].ShadowMode { + logger.Debugf("Cache key %s would be rate limited but shadow mode is enabled on this rule", cacheKey.Key) + } else { + logger.Debugf("cache key is over the limit: %s", cacheKey.Key) + } + isOverLimitWithLocalCache[i] = true + overlimitIndexes[i] = true + continue } - isOverLimitWithLocalCache[i] = true - hitAddendForRedis = 0 - overlimitIndex = i - continue } } + + // Now, actually setup the pipeline, skipping empty cache keys. for i, cacheKey := range cacheKeys { - if cacheKey.Key == "" || overlimitIndex == i { + if cacheKey.Key == "" || overlimitIndexes[i] { continue } @@ -91,12 +162,20 @@ func (this *fixedRateLimitCacheImpl) DoLimit( if perSecondPipeline == nil { perSecondPipeline = Pipeline{} } - pipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, hitAddendForRedis, &results[i], expirationSeconds) + if nearlimitIndexes[i] { + pipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) + } else { + pipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, hitsAddendForRedis, &results[i], expirationSeconds) + } } else { if pipeline == nil { pipeline = Pipeline{} } - pipelineAppend(this.client, &pipeline, cacheKey.Key, hitAddendForRedis, &results[i], expirationSeconds) + if nearlimitIndexes[i] { + pipelineAppend(this.client, &pipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) + } else { + pipelineAppend(this.client, &pipeline, cacheKey.Key, hitsAddendForRedis, &results[i], expirationSeconds) + } } } @@ -138,10 +217,12 @@ func (this *fixedRateLimitCacheImpl) DoLimit( func (this *fixedRateLimitCacheImpl) Flush() {} func NewFixedRateLimitCacheImpl(client Client, perSecondClient Client, timeSource utils.TimeSource, - jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, statsManager stats.Manager) limiter.RateLimitCache { + jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, statsManager stats.Manager, + stopCacheKeyIncrementWhenOverlimit bool) limiter.RateLimitCache { return &fixedRateLimitCacheImpl{ - client: client, - perSecondClient: perSecondClient, - baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix, statsManager), + client: client, + perSecondClient: perSecondClient, + stopCacheKeyIncrementWhenOverlimit: stopCacheKeyIncrementWhenOverlimit, + baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio, cacheKeyPrefix, statsManager), } } diff --git a/src/settings/settings.go b/src/settings/settings.go index 138d73e9..09704781 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -83,11 +83,12 @@ type Settings struct { RuntimeWatchRoot bool `envconfig:"RUNTIME_WATCH_ROOT" default:"true"` // Settings for all cache types - ExpirationJitterMaxSeconds int64 `envconfig:"EXPIRATION_JITTER_MAX_SECONDS" default:"300"` - LocalCacheSizeInBytes int `envconfig:"LOCAL_CACHE_SIZE_IN_BYTES" default:"0"` - NearLimitRatio float32 `envconfig:"NEAR_LIMIT_RATIO" default:"0.8"` - CacheKeyPrefix string `envconfig:"CACHE_KEY_PREFIX" default:""` - BackendType string `envconfig:"BACKEND_TYPE" default:"redis"` + ExpirationJitterMaxSeconds int64 `envconfig:"EXPIRATION_JITTER_MAX_SECONDS" default:"300"` + LocalCacheSizeInBytes int `envconfig:"LOCAL_CACHE_SIZE_IN_BYTES" default:"0"` + NearLimitRatio float32 `envconfig:"NEAR_LIMIT_RATIO" default:"0.8"` + CacheKeyPrefix string `envconfig:"CACHE_KEY_PREFIX" default:""` + BackendType string `envconfig:"BACKEND_TYPE" default:"redis"` + StopCacheKeyIncrementWhenOverlimit bool `envconfig:"STOP_CACHE_KEY_INCREMENT_WHEN_OVERLIMIT" default:"false"` // Settings for optional returning of custom headers RateLimitResponseHeadersEnabled bool `envconfig:"LIMIT_RESPONSE_HEADERS_ENABLED" default:"false"` diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index c951d1ab..7239e7c3 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -81,6 +81,13 @@ func makeSimpleRedisSettings(redisPort int, perSecondPort int, perSecond bool, l return s } +func makeSimpleRedisSettingsWithStopCacheKeyIncrementWhenOverlimit(redisPort int, perSecondPort int, perSecond bool, localCacheSize int) settings.Settings { + s := makeSimpleRedisSettings(redisPort, perSecondPort, perSecond, localCacheSize) + + s.StopCacheKeyIncrementWhenOverlimit = true + return s +} + func TestBasicConfig(t *testing.T) { common.WithMultiRedis(t, []common.RedisConfig{ {Port: 6383}, @@ -93,6 +100,10 @@ func TestBasicConfig(t *testing.T) { cacheSettings := makeSimpleRedisSettings(6383, 6380, false, 0) cacheSettings.CacheKeyPrefix = "prefix:" t.Run("WithoutPerSecondRedisWithCachePrefix", testBasicConfig(cacheSettings)) + t.Run("WithoutPerSecondRedisWithstopCacheKeyIncrementWhenOverlimitConfig", testBasicConfig(makeSimpleRedisSettingsWithStopCacheKeyIncrementWhenOverlimit(6383, 6380, false, 0))) + t.Run("WithPerSecondRedisWithstopCacheKeyIncrementWhenOverlimitConfig", testBasicConfig(makeSimpleRedisSettingsWithStopCacheKeyIncrementWhenOverlimit(6383, 6380, true, 0))) + t.Run("WithoutPerSecondRedisWithLocalCacheAndstopCacheKeyIncrementWhenOverlimitConfig", testBasicConfig(makeSimpleRedisSettingsWithStopCacheKeyIncrementWhenOverlimit(6383, 6380, false, 1000))) + t.Run("WithPerSecondRedisWithLocalCacheAndstopCacheKeyIncrementWhenOverlimitConfig", testBasicConfig(makeSimpleRedisSettingsWithStopCacheKeyIncrementWhenOverlimit(6383, 6380, true, 1000))) }) } @@ -594,8 +605,8 @@ func testBasicBaseConfig(s settings.Settings) func(*testing.T) { limitRemaining2 = 0 // Ceased incrementing cached keys upon exceeding the overall rate limit in the Redis cache flow. // Consequently, the remaining limit should remain unaltered. - if enable_local_cache && s.BackendType != "memcache" { - limitRemaining1 = 9 + if s.StopCacheKeyIncrementWhenOverlimit && s.BackendType != "memcache" { + limitRemaining1 = 10 } } durRemaining1 := response.GetStatuses()[0].DurationUntilReset diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index bc8a9eee..b0b2a215 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -47,7 +47,7 @@ func BenchmarkParallelDoLimit(b *testing.B) { client := redis.NewClientImpl(statsStore, false, "", "tcp", "single", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit, nil, false, nil) defer client.Close() - cache := redis.NewFixedRateLimitCacheImpl(client, nil, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, "", sm) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, "", sm, true) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} diff --git a/test/redis/fixed_cache_impl_test.go b/test/redis/fixed_cache_impl_test.go index cbbf33b1..8c7f3c47 100644 --- a/test/redis/fixed_cache_impl_test.go +++ b/test/redis/fixed_cache_impl_test.go @@ -51,9 +51,9 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) var cache limiter.RateLimitCache if usePerSecondRedis { - cache = redis.NewFixedRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "", sm) + cache = redis.NewFixedRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "", sm, false) } else { - cache = redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "", sm) + cache = redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "", sm, false) } timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) @@ -190,7 +190,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { localCache := freecache.NewCache(100) statsStore := gostats.NewStore(gostats.NewNullSink(), false) sm := stats.NewMockStatManager(statsStore) - cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8, "", sm) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8, "", sm, false) sink := &common.TestStatSink{} localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) @@ -292,7 +292,7 @@ func TestNearLimit(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) statsStore := gostats.NewStore(gostats.NewNullSink(), false) sm := stats.NewMockStatManager(statsStore) - cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "", sm) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "", sm, false) // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) @@ -466,7 +466,7 @@ func TestRedisWithJitter(t *testing.T) { jitterSource := mock_utils.NewMockJitterRandSource(controller) statsStore := gostats.NewStore(gostats.NewNullSink(), false) sm := stats.NewMockStatManager(statsStore) - cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil, 0.8, "", sm) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil, 0.8, "", sm, false) timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) jitterSource.EXPECT().Int63().Return(int64(100)) @@ -496,7 +496,7 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { localCache := freecache.NewCache(100) statsStore := gostats.NewStore(gostats.NewNullSink(), false) sm := stats.NewMockStatManager(statsStore) - cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8, "", sm) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8, "", sm, false) sink := &common.TestStatSink{} localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) @@ -609,7 +609,7 @@ func TestRedisTracer(t *testing.T) { client := mock_redis.NewMockClient(controller) timeSource := mock_utils.NewMockTimeSource(controller) - cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "", sm) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "", sm, false) timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) @@ -626,3 +626,123 @@ func TestRedisTracer(t *testing.T) { assert.Len(spanStubs, 1) assert.Equal(spanStubs[0].Name, "Redis Pipeline Execution") } + +func TestOverLimitWithStopCacheKeyIncrementWhenOverlimitConfig(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + client := mock_redis.NewMockClient(controller) + timeSource := mock_utils.NewMockTimeSource(controller) + localCache := freecache.NewCache(100) + statsStore := gostats.NewStore(gostats.NewNullSink(), false) + sm := stats.NewMockStatManager(statsStore) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8, "", sm, true) + sink := &common.TestStatSink{} + localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) + + // Test Near Limit Stats. Under Near Limit Ratio + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(5) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_997200").SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key5_value5_997200").SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key5_value5_997200", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil).Times(2) + + request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}, {{"key5", "value5"}}}, 1) + + limits := []*config.RateLimit{ + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, "", nil, false), + config.NewRateLimit(14, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key5_value5"), false, false, "", nil, false), + } + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[1].Limit, LimitRemaining: 3, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, + }, + cache.DoLimit(context.Background(), request, limits)) + assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) + assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) + assert.Equal(uint64(1), limits[0].Stats.WithinLimit.Value()) + assert.Equal(uint64(1), limits[1].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[1].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[1].Stats.OverLimitWithLocalCache.Value()) + assert.Equal(uint64(0), limits[1].Stats.NearLimit.Value()) + assert.Equal(uint64(1), limits[1].Stats.WithinLimit.Value()) + + // Check the local cache stats. + testLocalCacheStats(localCacheStats, statsStore, sink, 0, 1, 1, 0, 0) + + // Test Near Limit Stats. At Near Limit Ratio, still OK + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(5) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_997200").SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key5_value5_997200").SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_997200", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key5_value5_997200", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil).Times(2) + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[1].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, + }, + cache.DoLimit(context.Background(), request, limits)) + assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) + assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) + assert.Equal(uint64(2), limits[0].Stats.WithinLimit.Value()) + assert.Equal(uint64(2), limits[1].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[1].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[1].Stats.OverLimitWithLocalCache.Value()) + assert.Equal(uint64(1), limits[1].Stats.NearLimit.Value()) + assert.Equal(uint64(2), limits[1].Stats.WithinLimit.Value()) + + // Check the local cache stats. + testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 2, 0, 0) + + // Test one key is reaching to the Overlimit threshold + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(5) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_997200").SetArg(1, uint32(14)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key5_value5_997200").SetArg(1, uint32(14)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(0)).SetArg(1, uint32(14)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_997200", uint32(1)).SetArg(1, uint32(14)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key5_value5_997200", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil).Times(2) + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, + }, + cache.DoLimit(context.Background(), request, limits)) + assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) + assert.Equal(uint64(2), limits[0].Stats.NearLimit.Value()) + assert.Equal(uint64(3), limits[0].Stats.WithinLimit.Value()) + assert.Equal(uint64(3), limits[1].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[1].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[1].Stats.OverLimitWithLocalCache.Value()) + assert.Equal(uint64(2), limits[1].Stats.NearLimit.Value()) + assert.Equal(uint64(3), limits[1].Stats.WithinLimit.Value()) + + // Check the local cache stats. + testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 3, 0, 1) +} From 33c8573d9d1c9f4615bbfffe3d59ced48f42fc3e Mon Sep 17 00:00:00 2001 From: Paul Salaberria Date: Thu, 14 Sep 2023 04:32:39 +0200 Subject: [PATCH 073/181] Update alpine version to fix CVEs (#445) Signed-off-by: Paul Salaberria --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index c4d4c85a..3d4a7739 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,6 +10,6 @@ COPY script script RUN CGO_ENABLED=0 GOOS=linux go build -o /go/bin/ratelimit -ldflags="-w -s" -v github.com/envoyproxy/ratelimit/src/service_cmd -FROM alpine:3.18.2 AS final +FROM alpine:3.18.3 AS final RUN apk --no-cache add ca-certificates && apk --no-cache update COPY --from=build /go/bin/ratelimit /bin/ratelimit From 624a5893a5874bfad90e5c91b61352354a609e8c Mon Sep 17 00:00:00 2001 From: Chashika Weerathunga Date: Sun, 17 Sep 2023 01:52:52 +0530 Subject: [PATCH 074/181] Modify the comment (#447) Signed-off-by: chashikajw --- src/redis/fixed_cache_impl.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index 58e40331..4ec34b3d 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -98,7 +98,7 @@ func (this *fixedRateLimitCacheImpl) DoLimit( } } - // Only if none of the cache keys are over the limit, call Redis to check whether cache keys are getting overlimited. + // Only if none of the cache keys exceed the limit, call Redis to check whether the cache keys are becoming overlimited. if len(cacheKeys) > 1 && !isCacheKeyOverlimit { if pipelineToGet != nil { checkError(this.client.PipeDo(pipelineToGet)) From a34dbf9eaf805817e3fc186b452d4717f74ef13c Mon Sep 17 00:00:00 2001 From: Vito Sabella Date: Fri, 22 Sep 2023 05:00:13 +0700 Subject: [PATCH 075/181] fix: Update golang 1.20.8 and go x/net to 0.15 for CVE-2023-3978 (#448) Signed-off-by: Vito Sabella --- Dockerfile | 2 +- go.mod | 6 +++--- go.sum | 12 ++++++------ 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Dockerfile b/Dockerfile index 3d4a7739..69a56965 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20.7 AS build +FROM golang:1.20.8 AS build WORKDIR /ratelimit ENV GOPROXY=https://proxy.golang.org diff --git a/go.mod b/go.mod index 825101d6..4d2a8459 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/mediocregopher/radix/v3 v3.8.1 github.com/sirupsen/logrus v1.6.0 github.com/stretchr/testify v1.8.1 - golang.org/x/net v0.10.0 + golang.org/x/net v0.15.0 google.golang.org/grpc v1.52.0 google.golang.org/protobuf v1.28.1 gopkg.in/yaml.v2 v2.3.0 @@ -56,6 +56,6 @@ require ( go.opentelemetry.io/otel/sdk v1.7.0 go.opentelemetry.io/otel/trace v1.7.0 go.opentelemetry.io/proto/otlp v0.19.0 // indirect - golang.org/x/sys v0.8.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/sys v0.12.0 // indirect + golang.org/x/text v0.13.0 // indirect ) diff --git a/go.sum b/go.sum index 81fef458..4749db10 100644 --- a/go.sum +++ b/go.sum @@ -321,8 +321,8 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -373,8 +373,8 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -382,8 +382,8 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From b9796237b9cb028d90424fd0859e10133c510a29 Mon Sep 17 00:00:00 2001 From: Charlie <104772503+ChuckCrawford@users.noreply.github.com> Date: Mon, 9 Oct 2023 13:54:17 -0400 Subject: [PATCH 076/181] Lint Cleanup (#452) * Resolves most go staticcheck errors. * Replaces a few deprecated packages / functions. * Makes some minor changes to error responses on the /json endpoint. Signed-off-by: Charlie Crawford --- src/client_cmd/main.go | 8 +- src/config/config_impl.go | 4 +- src/config_check_cmd/main.go | 5 +- src/limiter/base_limiter.go | 5 +- src/redis/cache_impl.go | 4 +- src/redis/driver_impl.go | 2 +- src/server/health.go | 2 +- src/server/server_impl.go | 35 ++++++--- src/stats/manager.go | 3 +- test/common/common.go | 8 +- test/config/config_test.go | 77 ++++++++++--------- .../memcached/stats_collecting_client_test.go | 4 +- test/server/health_test.go | 22 +++--- test/server/server_impl_test.go | 14 ++-- 14 files changed, 100 insertions(+), 93 deletions(-) diff --git a/src/client_cmd/main.go b/src/client_cmd/main.go index a3750f99..63e87b8f 100644 --- a/src/client_cmd/main.go +++ b/src/client_cmd/main.go @@ -13,6 +13,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "github.com/envoyproxy/ratelimit/src/utils" @@ -95,7 +96,7 @@ func main() { tlsConfig := utils.TlsConfigFromFiles(*grpcTlsCertFile, *grpcTlsKeyFile, *grpcServerTlsCACert, utils.ServerCA, false) dialOptions = append(dialOptions, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) } else { - dialOptions = append(dialOptions, grpc.WithInsecure()) + dialOptions = append(dialOptions, grpc.WithTransportCredentials(insecure.NewCredentials())) } conn, err := grpc.Dial(*dialString, dialOptions...) if err != nil { @@ -106,9 +107,8 @@ func main() { defer conn.Close() c := pb.NewRateLimitServiceClient(conn) desc := make([]*pb_struct.RateLimitDescriptor, len(descriptorsValue.descriptors)) - for i, v := range descriptorsValue.descriptors { - desc[i] = v - } + copy(desc, descriptorsValue.descriptors) + response, err := c.ShouldRateLimit( context.Background(), &pb.RateLimitRequest{ diff --git a/src/config/config_impl.go b/src/config/config_impl.go index 1a537951..2bdbf9d4 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -151,7 +151,7 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p if validUnit { panic(newRateLimitConfigError( config.Name, - fmt.Sprintf("should not specify rate limit unit when unlimited"))) + "should not specify rate limit unit when unlimited")) } } else if !validUnit { panic(newRateLimitConfigError( @@ -234,7 +234,7 @@ func validateYamlKeys(fileName string, config_map map[interface{}]interface{}) { // the yaml's keys we don't panic here. case nil: default: - errorText := fmt.Sprintf("error checking config") + errorText := "error checking config" logger.Debugf(errorText) panic(newRateLimitConfigError(fileName, errorText)) } diff --git a/src/config_check_cmd/main.go b/src/config_check_cmd/main.go index dc313c31..8f83a724 100644 --- a/src/config_check_cmd/main.go +++ b/src/config_check_cmd/main.go @@ -3,7 +3,6 @@ package main import ( "flag" "fmt" - "io/ioutil" "os" "path/filepath" @@ -36,7 +35,7 @@ func main() { fmt.Printf("checking rate limit configs...\n") fmt.Printf("loading config directory: %s\n", *configDirectory) - files, err := ioutil.ReadDir(*configDirectory) + files, err := os.ReadDir(*configDirectory) if err != nil { fmt.Printf("error opening directory %s: %s\n", *configDirectory, err.Error()) os.Exit(1) @@ -46,7 +45,7 @@ func main() { for _, file := range files { finalPath := filepath.Join(*configDirectory, file.Name()) fmt.Printf("opening config file: %s\n", finalPath) - bytes, err := ioutil.ReadFile(finalPath) + bytes, err := os.ReadFile(finalPath) if err != nil { fmt.Printf("error reading file %s: %s\n", finalPath, err.Error()) os.Exit(1) diff --git a/src/limiter/base_limiter.go b/src/limiter/base_limiter.go index d8a6b7cb..b76366cf 100644 --- a/src/limiter/base_limiter.go +++ b/src/limiter/base_limiter.go @@ -73,10 +73,7 @@ func (this *BaseRateLimiter) IsOverLimitWithLocalCache(key string) bool { func (this *BaseRateLimiter) IsOverLimitThresholdReached(limitInfo *LimitInfo) bool { limitInfo.overLimitThreshold = limitInfo.limit.Limit.RequestsPerUnit - if limitInfo.limitAfterIncrease > limitInfo.overLimitThreshold { - return true - } - return false + return limitInfo.limitAfterIncrease > limitInfo.overLimitThreshold } // Generates response descriptor status based on cache key, over the limit with local cache, over the limit and diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index 9ad9731e..0b0a45b4 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -18,8 +18,8 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca perSecondPool = NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, s.RedisPerSecondSocketType, s.RedisPerSecondType, s.RedisPerSecondUrl, s.RedisPerSecondPoolSize, s.RedisPerSecondPipelineWindow, s.RedisPerSecondPipelineLimit, s.RedisTlsConfig, s.RedisHealthCheckActiveConnection, srv) } - var otherPool Client - otherPool = NewClientImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisSocketType, s.RedisType, s.RedisUrl, s.RedisPoolSize, + + otherPool := NewClientImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisSocketType, s.RedisType, s.RedisUrl, s.RedisPoolSize, s.RedisPipelineWindow, s.RedisPipelineLimit, s.RedisTlsConfig, s.RedisHealthCheckActiveConnection, srv) return NewFixedRateLimitCacheImpl( diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index 436458ca..f68addda 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -119,7 +119,7 @@ func NewClientImpl(scope stats.Scope, useTls bool, auth, redisSocketType, redisT client, err = poolFunc(redisSocketType, url) case "cluster": urls := strings.Split(url, ",") - if implicitPipelining == false { + if !implicitPipelining { panic(RedisError("Implicit Pipelining must be enabled to work with Redis Cluster Mode. Set values for REDIS_PIPELINE_WINDOW or REDIS_PIPELINE_LIMIT to enable implicit pipelining")) } logger.Warnf("Creating cluster with urls %v", urls) diff --git a/src/server/health.go b/src/server/health.go index 9af7d2dc..7a2f9cb7 100644 --- a/src/server/health.go +++ b/src/server/health.go @@ -33,7 +33,7 @@ const ( func areAllComponentsHealthy(healthMap map[string]bool) bool { allComponentsHealthy := true for _, value := range healthMap { - if value == false { + if !value { allComponentsHealthy = false break } diff --git a/src/server/server_impl.go b/src/server/server_impl.go index dbadbe1e..85c636b7 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -1,7 +1,6 @@ package server import ( - "bytes" "context" "expvar" "fmt" @@ -18,13 +17,13 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" + "google.golang.org/protobuf/encoding/protojson" "github.com/envoyproxy/ratelimit/src/provider" "github.com/envoyproxy/ratelimit/src/stats" "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/golang/protobuf/jsonpb" "github.com/gorilla/mux" reuseport "github.com/kavu/go_reuseport" "github.com/lyft/goruntime/loader" @@ -79,24 +78,28 @@ func (server *server) AddDebugHttpEndpoint(path string, help string, handler htt // example usage from cURL with domain "dummy" and descriptor "perday": // echo '{"domain": "dummy", "descriptors": [{"entries": [{"key": "perday"}]}]}' | curl -vvvXPOST --data @/dev/stdin localhost:8080/json func NewJsonHandler(svc pb.RateLimitServiceServer) func(http.ResponseWriter, *http.Request) { - // Default options include enums as strings and no identation. - m := &jsonpb.Marshaler{} - return func(writer http.ResponseWriter, request *http.Request) { var req pb.RateLimitRequest ctx := context.Background() - if err := jsonpb.Unmarshal(request.Body, &req); err != nil { + body, err := io.ReadAll(request.Body) + if err != nil { + logger.Warnf("error: %s", err.Error()) + writeHttpStatus(writer, http.StatusBadRequest) + return + } + + if err := protojson.Unmarshal(body, &req); err != nil { logger.Warnf("error: %s", err.Error()) - http.Error(writer, err.Error(), http.StatusBadRequest) + writeHttpStatus(writer, http.StatusBadRequest) return } resp, err := svc.ShouldRateLimit(ctx, &req) if err != nil { logger.Warnf("error: %s", err.Error()) - http.Error(writer, err.Error(), http.StatusBadRequest) + writeHttpStatus(writer, http.StatusBadRequest) return } @@ -109,12 +112,16 @@ func NewJsonHandler(svc pb.RateLimitServiceServer) func(http.ResponseWriter, *ht defer span.End() logger.Debugf("resp:%s", resp) + if resp == nil { + logger.Error("nil response") + writeHttpStatus(writer, http.StatusInternalServerError) + return + } - buf := bytes.NewBuffer(nil) - err = m.Marshal(buf, resp) + jsonResp, err := protojson.Marshal(resp) if err != nil { logger.Errorf("error marshaling proto3 to json: %s", err.Error()) - http.Error(writer, "error marshaling proto3 to json: "+err.Error(), http.StatusInternalServerError) + writeHttpStatus(writer, http.StatusInternalServerError) return } @@ -124,10 +131,14 @@ func NewJsonHandler(svc pb.RateLimitServiceServer) func(http.ResponseWriter, *ht } else if resp.OverallCode == pb.RateLimitResponse_OVER_LIMIT { writer.WriteHeader(http.StatusTooManyRequests) } - writer.Write(buf.Bytes()) + writer.Write(jsonResp) } } +func writeHttpStatus(writer http.ResponseWriter, code int) { + http.Error(writer, http.StatusText(code), code) +} + func getProviderImpl(s settings.Settings, statsManager stats.Manager, rootStore gostats.Store) provider.RateLimitConfigProvider { switch s.ConfigType { case "FILE": diff --git a/src/stats/manager.go b/src/stats/manager.go index cab7dc33..ba83fd51 100644 --- a/src/stats/manager.go +++ b/src/stats/manager.go @@ -2,7 +2,6 @@ package stats import ( gostats "github.com/lyft/gostats" - stats "github.com/lyft/gostats" ) // Manager is the interface that wraps initialization of stat structures. @@ -17,7 +16,7 @@ type Manager interface { // Multiple calls to this method are idempotent. NewServiceStats() ServiceStats // Returns the stats.Store wrapped by the Manager. - GetStatsStore() stats.Store + GetStatsStore() gostats.Store } type ManagerImpl struct { diff --git a/test/common/common.go b/test/common/common.go index 0fec1ae1..1062a899 100644 --- a/test/common/common.go +++ b/test/common/common.go @@ -12,8 +12,8 @@ import ( "testing" "time" - "github.com/golang/protobuf/proto" "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/proto" pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" @@ -111,7 +111,7 @@ func startCacheProcess(ctx context.Context, command string, args []string, port if err1 != nil || err2 != nil { cancel() - return nil, fmt.Errorf("Problem starting %s subprocess: %v / %v", command, err1, err2) + return nil, fmt.Errorf("problem starting %s subprocess: %v / %v", command, err1, err2) } // You'd think cmd.Stdout = os.Stdout would make more sense here, but @@ -127,13 +127,13 @@ func startCacheProcess(ctx context.Context, command string, args []string, port err := cmd.Start() if err != nil { cancel() - return nil, fmt.Errorf("Problem starting %s subprocess: %v", command, err) + return nil, fmt.Errorf("problem starting %s subprocess: %v", command, err) } err = WaitForTcpPort(ctx, port, 1*time.Second) if err != nil { cancel() - return nil, fmt.Errorf("Timed out waiting for %s to start up and accept connections: %v", command, err) + return nil, fmt.Errorf("timed out waiting for %s to start up and accept connections: %v", command, err) } return func() { diff --git a/test/config/config_test.go b/test/config/config_test.go index ed131110..3ed3984b 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -1,7 +1,8 @@ package config_test import ( - "io/ioutil" + "context" + "os" "testing" "github.com/envoyproxy/ratelimit/test/common" @@ -17,7 +18,7 @@ import ( ) func loadFile(path string) []config.RateLimitConfigToLoad { - contents, err := ioutil.ReadFile(path) + contents, err := os.ReadFile(path) if err != nil { panic(err) } @@ -31,39 +32,39 @@ func TestBasicConfig(t *testing.T) { rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), mockstats.NewMockStatManager(stats), false) rlConfig.Dump() assert.Equal(rlConfig.IsEmptyDomains(), false) - assert.Nil(rlConfig.GetLimit(nil, "foo_domain", &pb_struct.RateLimitDescriptor{})) - assert.Nil(rlConfig.GetLimit(nil, "test-domain", &pb_struct.RateLimitDescriptor{})) + assert.Nil(rlConfig.GetLimit(context.TODO(), "foo_domain", &pb_struct.RateLimitDescriptor{})) + assert.Nil(rlConfig.GetLimit(context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{})) rl := rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key1", Value: "something"}}, }) assert.Nil(rl) rl = rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key1", Value: "value1"}}, }) assert.Nil(rl) rl = rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key2", Value: "value2"}, {Key: "subkey", Value: "subvalue"}}, }) assert.Nil(rl) rl = rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key5", Value: "value5"}, {Key: "subkey5", Value: "subvalue"}}, }) assert.Nil(rl) rl = rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key1", Value: "value1"}, {Key: "subkey1", Value: "something"}}, }) @@ -79,7 +80,7 @@ func TestBasicConfig(t *testing.T) { assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1.within_limit").Value()) rl = rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key1", Value: "value1"}, {Key: "subkey1", Value: "subvalue1"}}, }) @@ -99,7 +100,7 @@ func TestBasicConfig(t *testing.T) { 1, stats.NewCounter("test-domain.key1_value1.subkey1_subvalue1.within_limit").Value()) rl = rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key2", Value: "something"}}, }) @@ -115,7 +116,7 @@ func TestBasicConfig(t *testing.T) { assert.EqualValues(1, stats.NewCounter("test-domain.key2.within_limit").Value()) rl = rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key2", Value: "value2"}}, }) @@ -131,14 +132,14 @@ func TestBasicConfig(t *testing.T) { assert.EqualValues(1, stats.NewCounter("test-domain.key2_value2.within_limit").Value()) rl = rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key2", Value: "value3"}}, }) assert.Nil(rl) rl = rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key3", Value: "foo"}}, }) @@ -154,7 +155,7 @@ func TestBasicConfig(t *testing.T) { assert.EqualValues(1, stats.NewCounter("test-domain.key3.within_limit").Value()) rl = rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key4", Value: "foo"}}, }) @@ -170,7 +171,7 @@ func TestBasicConfig(t *testing.T) { assert.EqualValues(1, stats.NewCounter("test-domain.key4.within_limit").Value()) rl = rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key6", Value: "foo"}}, }) @@ -183,7 +184,7 @@ func TestBasicConfig(t *testing.T) { // A value for the key with detailed_metric: true // should also generate a stat with the value included rl = rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key7", Value: "unspecified_value"}}, }) @@ -201,7 +202,7 @@ func TestBasicConfig(t *testing.T) { // Another value for the key with detailed_metric: true // should also generate a stat with the value included rl = rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key7", Value: "another_value"}}, }) @@ -226,11 +227,11 @@ func TestDomainMerge(t *testing.T) { rlConfig := config.NewRateLimitConfigImpl(files, mockstats.NewMockStatManager(stats), true) rlConfig.Dump() - assert.Nil(rlConfig.GetLimit(nil, "foo_domain", &pb_struct.RateLimitDescriptor{})) - assert.Nil(rlConfig.GetLimit(nil, "test-domain", &pb_struct.RateLimitDescriptor{})) + assert.Nil(rlConfig.GetLimit(context.TODO(), "foo_domain", &pb_struct.RateLimitDescriptor{})) + assert.Nil(rlConfig.GetLimit(context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{})) rl := rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key1", Value: "value1"}}, }) @@ -238,7 +239,7 @@ func TestDomainMerge(t *testing.T) { assert.EqualValues(10, rl.Limit.RequestsPerUnit) rl = rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key2", Value: "value2"}}, }) @@ -252,13 +253,13 @@ func TestConfigLimitOverride(t *testing.T) { rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), mockstats.NewMockStatManager(stats), false) rlConfig.Dump() // No matching domain - assert.Nil(rlConfig.GetLimit(nil, "foo_domain", &pb_struct.RateLimitDescriptor{ + assert.Nil(rlConfig.GetLimit(context.TODO(), "foo_domain", &pb_struct.RateLimitDescriptor{ Limit: &pb_struct.RateLimitDescriptor_RateLimitOverride{ RequestsPerUnit: 10, Unit: pb_type.RateLimitUnit_DAY, }, })) rl := rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key1", Value: "value1"}, {Key: "subkey1", Value: "something"}}, Limit: &pb_struct.RateLimitDescriptor_RateLimitOverride{ @@ -281,7 +282,7 @@ func TestConfigLimitOverride(t *testing.T) { // Change in override value doesn't erase stats rl = rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key1", Value: "value1"}, {Key: "subkey1", Value: "something"}}, Limit: &pb_struct.RateLimitDescriptor_RateLimitOverride{ @@ -304,7 +305,7 @@ func TestConfigLimitOverride(t *testing.T) { // Different value creates a different counter rl = rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key1", Value: "value1"}, {Key: "subkey1", Value: "something_else"}}, Limit: &pb_struct.RateLimitDescriptor_RateLimitOverride{ @@ -498,7 +499,7 @@ func TestShadowModeConfig(t *testing.T) { rlConfig.Dump() rl := rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key1", Value: "value1"}, {Key: "subkey1", Value: "something"}}, }) @@ -515,7 +516,7 @@ func TestShadowModeConfig(t *testing.T) { assert.EqualValues(0, stats.NewCounter("test-domain.key1_value1.subkey1.shadow_mode").Value()) rl = rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key1", Value: "value1"}, {Key: "subkey1", Value: "subvalue1"}}, }) @@ -533,7 +534,7 @@ func TestShadowModeConfig(t *testing.T) { assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_subvalue1.shadow_mode").Value()) rl = rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key2", Value: "something"}}, }) @@ -550,7 +551,7 @@ func TestShadowModeConfig(t *testing.T) { assert.EqualValues(1, stats.NewCounter("test-domain.key2.shadow_mode").Value()) rl = rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key2", Value: "value2"}}, }) @@ -574,12 +575,12 @@ func TestWildcardConfig(t *testing.T) { // Baseline to show wildcard works like no value withoutVal1 := rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "noVal", Value: "foo1"}}, }) withoutVal2 := rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "noVal", Value: "foo2"}}, }) @@ -588,17 +589,17 @@ func TestWildcardConfig(t *testing.T) { // Matches multiple wildcard values and results are equal wildcard1 := rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "wild", Value: "foo1"}}, }) wildcard2 := rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "wild", Value: "foo2"}}, }) wildcard3 := rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "nestedWild", Value: "val1"}, {Key: "wild", Value: "goo2"}}, }) @@ -608,7 +609,7 @@ func TestWildcardConfig(t *testing.T) { // Doesn't match non-matching values noMatch := rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "wild", Value: "bar"}}, }) @@ -616,7 +617,7 @@ func TestWildcardConfig(t *testing.T) { // Non-wildcard values don't eager match eager := rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "noWild", Value: "foo1"}}, }) @@ -624,7 +625,7 @@ func TestWildcardConfig(t *testing.T) { // Wildcard in the middle of value is not supported. midWildcard := rlConfig.GetLimit( - nil, "test-domain", + context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{ Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "midWildcard", Value: "barab"}}, }) diff --git a/test/memcached/stats_collecting_client_test.go b/test/memcached/stats_collecting_client_test.go index d2e2b5cd..5d9849ae 100644 --- a/test/memcached/stats_collecting_client_test.go +++ b/test/memcached/stats_collecting_client_test.go @@ -135,7 +135,7 @@ func TestStats_Increment(t *testing.T) { expectedErr := errors.New("expectedError") fakeSink.Reset() client.EXPECT().Increment("foo", uint64(5)).Return(uint64(0), expectedErr) - newValue, err = sc.Increment("foo", 5) + _, err = sc.Increment("foo", 5) statsStore.Flush() assert.Equal(expectedErr, err) @@ -145,7 +145,7 @@ func TestStats_Increment(t *testing.T) { fakeSink.Reset() client.EXPECT().Increment("foo", uint64(5)).Return(uint64(0), memcache.ErrCacheMiss) - newValue, err = sc.Increment("foo", 5) + _, err = sc.Increment("foo", 5) statsStore.Flush() assert.Equal(memcache.ErrCacheMiss, err) diff --git a/test/server/health_test.go b/test/server/health_test.go index fd0de4bd..a2238a5c 100644 --- a/test/server/health_test.go +++ b/test/server/health_test.go @@ -25,11 +25,11 @@ func TestHealthCheck(t *testing.T) { r, _ := http.NewRequest("GET", "http://1.2.3.4/healthcheck", nil) hc.ServeHTTP(recorder, r) - if 200 != recorder.Code { + if recorder.Code != 200 { t.Errorf("expected code 200 actual %d", recorder.Code) } - if "OK" != recorder.Body.String() { + if recorder.Body.String() != "OK" { t.Errorf("expected body 'OK', got '%s'", recorder.Body.String()) } @@ -43,7 +43,7 @@ func TestHealthCheck(t *testing.T) { r, _ = http.NewRequest("GET", "http://1.2.3.4/healthcheck", nil) hc.ServeHTTP(recorder, r) - if 500 != recorder.Code { + if recorder.Code != 500 { t.Errorf("expected code 500 actual %d", recorder.Code) } @@ -57,11 +57,11 @@ func TestHealthCheck(t *testing.T) { r, _ = http.NewRequest("GET", "http://1.2.3.4/healthcheck", nil) hc.ServeHTTP(recorder, r) - if 200 != recorder.Code { + if recorder.Code != 200 { t.Errorf("expected code 200 actual %d", recorder.Code) } - if "OK" != recorder.Body.String() { + if recorder.Body.String() != "OK" { t.Errorf("expected body 'OK', got '%s'", recorder.Body.String()) } } @@ -76,7 +76,7 @@ func TestHealthyWithAtLeastOneConfigLoaded(t *testing.T) { r, _ := http.NewRequest("GET", "http://1.2.3.4/healthcheck", nil) hc.ServeHTTP(recorder, r) - if 500 != recorder.Code { + if recorder.Code != 500 { t.Errorf("expected code 500 actual %d", recorder.Code) } @@ -90,11 +90,11 @@ func TestHealthyWithAtLeastOneConfigLoaded(t *testing.T) { r, _ = http.NewRequest("GET", "http://1.2.3.4/healthcheck", nil) hc.ServeHTTP(recorder, r) - if 200 != recorder.Code { + if recorder.Code != 200 { t.Errorf("expected code 200 actual %d", recorder.Code) } - if "OK" != recorder.Body.String() { + if recorder.Body.String() != "OK" { t.Errorf("expected body 'OK', got '%s'", recorder.Body.String()) } @@ -108,7 +108,7 @@ func TestHealthyWithAtLeastOneConfigLoaded(t *testing.T) { r, _ = http.NewRequest("GET", "http://1.2.3.4/healthcheck", nil) hc.ServeHTTP(recorder, r) - if 500 != recorder.Code { + if recorder.Code != 500 { t.Errorf("expected code 500 actual %d", recorder.Code) } @@ -122,11 +122,11 @@ func TestHealthyWithAtLeastOneConfigLoaded(t *testing.T) { r, _ = http.NewRequest("GET", "http://1.2.3.4/healthcheck", nil) hc.ServeHTTP(recorder, r) - if 200 != recorder.Code { + if recorder.Code != 200 { t.Errorf("expected code 200 actual %d", recorder.Code) } - if "OK" != recorder.Body.String() { + if recorder.Body.String() != "OK" { t.Errorf("expected body 'OK', got '%s'", recorder.Body.String()) } } diff --git a/test/server/server_impl_test.go b/test/server/server_impl_test.go index 235ba2a5..9896b798 100644 --- a/test/server/server_impl_test.go +++ b/test/server/server_impl_test.go @@ -3,14 +3,14 @@ package server_test import ( "context" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "strings" "testing" - "github.com/golang/protobuf/proto" "github.com/stretchr/testify/mock" + "google.golang.org/protobuf/proto" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" @@ -36,7 +36,7 @@ func assertHttpResponse(t *testing.T, handler(w, req) resp := w.Result() - actualBody, _ := ioutil.ReadAll(resp.Body) + actualBody, _ := io.ReadAll(resp.Body) assert.Equal(expectedContentType, resp.Header.Get("Content-Type")) assert.Equal(expectedStatusCode, resp.StatusCode) assert.Equal(expectedResponseBody, string(actualBody)) @@ -55,10 +55,10 @@ func TestJsonHandler(t *testing.T) { }) // Missing request body - assertHttpResponse(t, handler, "", 400, "text/plain; charset=utf-8", "EOF\n") + assertHttpResponse(t, handler, "", 400, "text/plain; charset=utf-8", "Bad Request\n") // Request body is not valid json - assertHttpResponse(t, handler, "}", 400, "text/plain; charset=utf-8", "invalid character '}' looking for beginning of value\n") + assertHttpResponse(t, handler, "}", 400, "text/plain; charset=utf-8", "Bad Request\n") // Unknown response code rls.EXPECT().ShouldRateLimit(context.Background(), requestMatcher).Return(&pb.RateLimitResponse{}, nil) @@ -66,11 +66,11 @@ func TestJsonHandler(t *testing.T) { // ratelimit service error rls.EXPECT().ShouldRateLimit(context.Background(), requestMatcher).Return(nil, fmt.Errorf("some error")) - assertHttpResponse(t, handler, `{"domain": "foo"}`, 400, "text/plain; charset=utf-8", "some error\n") + assertHttpResponse(t, handler, `{"domain": "foo"}`, 400, "text/plain; charset=utf-8", "Bad Request\n") // json unmarshaling error rls.EXPECT().ShouldRateLimit(context.Background(), requestMatcher).Return(nil, nil) - assertHttpResponse(t, handler, `{"domain": "foo"}`, 500, "text/plain; charset=utf-8", "error marshaling proto3 to json: Marshal called with nil\n") + assertHttpResponse(t, handler, `{"domain": "foo"}`, 500, "text/plain; charset=utf-8", "Internal Server Error\n") // successful request, not rate limited rls.EXPECT().ShouldRateLimit(context.Background(), requestMatcher).Return(&pb.RateLimitResponse{ From 62bd52d0ca7572d778dad7edf8858a7d91832732 Mon Sep 17 00:00:00 2001 From: Charlie <104772503+ChuckCrawford@users.noreply.github.com> Date: Mon, 6 Nov 2023 12:43:00 -0500 Subject: [PATCH 077/181] Updates Golang and deps with CVEs (#454) Signed-off-by: Charlie Crawford --- .github/workflows/main.yaml | 2 +- .github/workflows/pullrequest.yaml | 2 +- CONTRIBUTING.md | 2 +- Dockerfile | 2 +- Dockerfile.integration | 2 +- README.md | 18 +++++++++--------- docker-compose.yml | 4 ++-- examples/xds-sotw-config-server/go.mod | 4 ++-- go.mod | 6 +++--- go.sum | 8 ++++---- 10 files changed, 25 insertions(+), 25 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index ee51db57..0ff131d6 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -46,7 +46,7 @@ jobs: - uses: actions/setup-go@v2 with: - go-version: "1.20" + go-version: "1.21.3" - name: run pre-commits run: | diff --git a/.github/workflows/pullrequest.yaml b/.github/workflows/pullrequest.yaml index 93d9dbf7..90477bd2 100644 --- a/.github/workflows/pullrequest.yaml +++ b/.github/workflows/pullrequest.yaml @@ -34,7 +34,7 @@ jobs: - uses: actions/setup-go@v2 with: - go-version: "1.20" + go-version: "1.21.3" - name: run pre-commits run: | diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9d43e10f..8138ed4c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -7,7 +7,7 @@ We welcome contributions from the community. Here are some guidelines. # Submitting a PR - Fork the repo. -- Before commiting any code, install the pre-commits by: +- Before committing any code, install the pre-commits by: ```bash make precommit_install diff --git a/Dockerfile b/Dockerfile index 69a56965..71b450e8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20.8 AS build +FROM golang:1.21.3 AS build WORKDIR /ratelimit ENV GOPROXY=https://proxy.golang.org diff --git a/Dockerfile.integration b/Dockerfile.integration index 821f1c46..6ab99744 100644 --- a/Dockerfile.integration +++ b/Dockerfile.integration @@ -1,5 +1,5 @@ # Running this docker image runs the integration tests. -FROM golang:1.20 +FROM golang:1.21.3 RUN apt-get update -y && apt-get install sudo stunnel4 redis memcached -y && rm -rf /var/lib/apt/lists/* diff --git a/README.md b/README.md index cf2bf649..4823013f 100644 --- a/README.md +++ b/README.md @@ -1003,15 +1003,6 @@ The following environment variables control the custom response feature: 1. `LIMIT_REMAINING_HEADER` - The default value is "RateLimit-Remaining", setting the environment variable will specify an alternative header name 1. `LIMIT_RESET_HEADER` - The default value is "RateLimit-Reset", setting the environment variable will specify an alternative header name -You may use the following commands to quickly setup a openTelemetry collector together with a Jaeger all-in-one binary for quickstart: - -```bash -docker run --name otlp -d -p 4318 -p 4317 -v examples/otlp-collector:/tmp/otlp-collector otel/opentelemetry-collector:0.48.0 -- --config /tmp/otlp-collector/config.yaml -otelcol-contrib --config examples/otlp-collector/config.yaml - -docker run -d --name jaeger -p 16686:16686 -p 14250:14250 jaegertracing/all-in-one:1.33 -``` - # Tracing Ratelimit service supports exporting spans in OLTP format. See [OpenTelemetry](https://opentelemetry.io/) for more information. @@ -1026,6 +1017,15 @@ The following environment variables control the tracing feature: 1. Other fields in [OTLP Exporter Documentation](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.8.0/specification/protocol/exporter.md). These section needs to be correctly configured in order to enable the exporter to export span to the correct destination. 1. `TRACING_SAMPLING_RATE` - Controls the sampling rate, defaults to 1 which means always sample. Valid range: 0.0-1.0. For high volume services, adjusting the sampling rate is recommended. +You may use the following commands to quickly setup a openTelemetry collector together with a Jaeger all-in-one binary for quickstart: + +```bash +docker run --name otlp -d -p 4318 -p 4317 -v examples/otlp-collector:/tmp/otlp-collector otel/opentelemetry-collector:0.48.0 -- --config /tmp/otlp-collector/config.yaml +otelcol-contrib --config examples/otlp-collector/config.yaml + +docker run -d --name jaeger -p 16686:16686 -p 14250:14250 jaegertracing/all-in-one:1.33 +``` + # mTLS Ratelimit supports mTLS when Envoy sends requests to the service. diff --git a/docker-compose.yml b/docker-compose.yml index bd5807ce..01a08b08 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -20,7 +20,7 @@ services: # minimal container that builds the ratelimit service binary and exits. ratelimit-build: - image: golang:1.20-alpine + image: golang:1.21.3-alpine working_dir: /go/src/github.com/envoyproxy/ratelimit command: go build -o /usr/local/bin/ratelimit ./src/service_cmd/main.go volumes: @@ -28,7 +28,7 @@ services: - binary:/usr/local/bin/ ratelimit-client-build: - image: golang:1.20-alpine + image: golang:1.21.3-alpine working_dir: /go/src/github.com/envoyproxy/ratelimit command: go build -o /usr/local/bin/ratelimit_client ./src/client_cmd/main.go volumes: diff --git a/examples/xds-sotw-config-server/go.mod b/examples/xds-sotw-config-server/go.mod index 218bb048..4ab69e0e 100644 --- a/examples/xds-sotw-config-server/go.mod +++ b/examples/xds-sotw-config-server/go.mod @@ -1,6 +1,6 @@ module github.com/envoyproxy/ratelimit/examples/xds-sotw-config-server -go 1.20 +go 1.21.3 require ( github.com/envoyproxy/go-control-plane v0.10.3-0.20230127155013-72157d335c8f @@ -14,7 +14,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect github.com/sirupsen/logrus v1.6.0 // indirect - golang.org/x/net v0.7.0 // indirect + golang.org/x/net v0.17.0 // indirect golang.org/x/sys v0.5.0 // indirect golang.org/x/text v0.7.0 // indirect google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect diff --git a/go.mod b/go.mod index 4d2a8459..593fae9e 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/envoyproxy/ratelimit -go 1.20 +go 1.21.3 require ( github.com/alicebob/miniredis/v2 v2.23.0 @@ -18,7 +18,7 @@ require ( github.com/mediocregopher/radix/v3 v3.8.1 github.com/sirupsen/logrus v1.6.0 github.com/stretchr/testify v1.8.1 - golang.org/x/net v0.15.0 + golang.org/x/net v0.17.0 google.golang.org/grpc v1.52.0 google.golang.org/protobuf v1.28.1 gopkg.in/yaml.v2 v2.3.0 @@ -56,6 +56,6 @@ require ( go.opentelemetry.io/otel/sdk v1.7.0 go.opentelemetry.io/otel/trace v1.7.0 go.opentelemetry.io/proto/otlp v0.19.0 // indirect - golang.org/x/sys v0.12.0 // indirect + golang.org/x/sys v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect ) diff --git a/go.sum b/go.sum index 4749db10..8a617e55 100644 --- a/go.sum +++ b/go.sum @@ -321,8 +321,8 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -373,8 +373,8 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From 5e1be594aed24ce8b9cdbd37400a348d8eaba2e1 Mon Sep 17 00:00:00 2001 From: Dmitriy Zakomirnyi Date: Fri, 10 Nov 2023 18:23:57 +0200 Subject: [PATCH 078/181] fix: update xds-sotw-config-server example dockerfile golang version (#455) This updates golang version in example xds-sotw-config-server Dockerfile in order to fix docker image build. Signed-off-by: Dmitriy Zakomirnyi --- examples/xds-sotw-config-server/Dockerfile | 2 +- examples/xds-sotw-config-server/go.mod | 6 ++--- examples/xds-sotw-config-server/go.sum | 27 ++++++++-------------- 3 files changed, 12 insertions(+), 23 deletions(-) diff --git a/examples/xds-sotw-config-server/Dockerfile b/examples/xds-sotw-config-server/Dockerfile index 52439213..cd804b27 100644 --- a/examples/xds-sotw-config-server/Dockerfile +++ b/examples/xds-sotw-config-server/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.18 AS build +FROM golang:1.21.3 AS build WORKDIR /xds-server COPY . . diff --git a/examples/xds-sotw-config-server/go.mod b/examples/xds-sotw-config-server/go.mod index 4ab69e0e..85ccd78a 100644 --- a/examples/xds-sotw-config-server/go.mod +++ b/examples/xds-sotw-config-server/go.mod @@ -12,11 +12,9 @@ require ( github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc // indirect github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect - github.com/sirupsen/logrus v1.6.0 // indirect golang.org/x/net v0.17.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect google.golang.org/protobuf v1.28.1 // indirect ) diff --git a/examples/xds-sotw-config-server/go.sum b/examples/xds-sotw-config-server/go.sum index 7382fd4f..cd38974d 100644 --- a/examples/xds-sotw-config-server/go.sum +++ b/examples/xds-sotw-config-server/go.sum @@ -24,15 +24,12 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -42,26 +39,19 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -83,5 +73,6 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From b095d15981177c44151b7737354e42f49fd07cd0 Mon Sep 17 00:00:00 2001 From: Charlie <104772503+ChuckCrawford@users.noreply.github.com> Date: Wed, 6 Dec 2023 10:45:02 -0500 Subject: [PATCH 079/181] Dependency Updates for CVE Resolutions (#463) Signed-off-by: Charlie Crawford --- .github/workflows/main.yaml | 2 +- .github/workflows/pullrequest.yaml | 2 +- Dockerfile | 2 +- Dockerfile.integration | 2 +- docker-compose.yml | 4 +- examples/xds-sotw-config-server/Dockerfile | 2 +- examples/xds-sotw-config-server/go.mod | 18 +- examples/xds-sotw-config-server/go.sum | 36 +- go.mod | 92 ++-- go.sum | 535 +++++---------------- 10 files changed, 205 insertions(+), 490 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 0ff131d6..ab692ae6 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -46,7 +46,7 @@ jobs: - uses: actions/setup-go@v2 with: - go-version: "1.21.3" + go-version: "1.21.5" - name: run pre-commits run: | diff --git a/.github/workflows/pullrequest.yaml b/.github/workflows/pullrequest.yaml index 90477bd2..f3b17246 100644 --- a/.github/workflows/pullrequest.yaml +++ b/.github/workflows/pullrequest.yaml @@ -34,7 +34,7 @@ jobs: - uses: actions/setup-go@v2 with: - go-version: "1.21.3" + go-version: "1.21.5" - name: run pre-commits run: | diff --git a/Dockerfile b/Dockerfile index 71b450e8..03acaed1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.3 AS build +FROM golang:1.21.5 AS build WORKDIR /ratelimit ENV GOPROXY=https://proxy.golang.org diff --git a/Dockerfile.integration b/Dockerfile.integration index 6ab99744..13c0f957 100644 --- a/Dockerfile.integration +++ b/Dockerfile.integration @@ -1,5 +1,5 @@ # Running this docker image runs the integration tests. -FROM golang:1.21.3 +FROM golang:1.21.5 RUN apt-get update -y && apt-get install sudo stunnel4 redis memcached -y && rm -rf /var/lib/apt/lists/* diff --git a/docker-compose.yml b/docker-compose.yml index 01a08b08..6f1b9f0f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -20,7 +20,7 @@ services: # minimal container that builds the ratelimit service binary and exits. ratelimit-build: - image: golang:1.21.3-alpine + image: golang:1.21.5-alpine working_dir: /go/src/github.com/envoyproxy/ratelimit command: go build -o /usr/local/bin/ratelimit ./src/service_cmd/main.go volumes: @@ -28,7 +28,7 @@ services: - binary:/usr/local/bin/ ratelimit-client-build: - image: golang:1.21.3-alpine + image: golang:1.21.5-alpine working_dir: /go/src/github.com/envoyproxy/ratelimit command: go build -o /usr/local/bin/ratelimit_client ./src/client_cmd/main.go volumes: diff --git a/examples/xds-sotw-config-server/Dockerfile b/examples/xds-sotw-config-server/Dockerfile index cd804b27..a93181d1 100644 --- a/examples/xds-sotw-config-server/Dockerfile +++ b/examples/xds-sotw-config-server/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.3 AS build +FROM golang:1.21.5 AS build WORKDIR /xds-server COPY . . diff --git a/examples/xds-sotw-config-server/go.mod b/examples/xds-sotw-config-server/go.mod index 85ccd78a..a6162ea0 100644 --- a/examples/xds-sotw-config-server/go.mod +++ b/examples/xds-sotw-config-server/go.mod @@ -1,20 +1,22 @@ module github.com/envoyproxy/ratelimit/examples/xds-sotw-config-server -go 1.21.3 +go 1.21.5 require ( - github.com/envoyproxy/go-control-plane v0.10.3-0.20230127155013-72157d335c8f - google.golang.org/grpc v1.52.0 + github.com/envoyproxy/go-control-plane v0.11.1 + google.golang.org/grpc v1.59.0 ) require ( github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect - github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc // indirect - github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/sys v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect - google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/protobuf v1.31.0 // indirect ) diff --git a/examples/xds-sotw-config-server/go.sum b/examples/xds-sotw-config-server/go.sum index cd38974d..b923eb81 100644 --- a/examples/xds-sotw-config-server/go.sum +++ b/examples/xds-sotw-config-server/go.sum @@ -4,23 +4,23 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc h1:PYXxkRUBGUMa5xgMVMDl62vEklZvKpVaxQeN9ie7Hfk= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.10.3-0.20230127155013-72157d335c8f h1:nqACgqiYlDnB0znidh+8uhnQVLeqfW5NyyRfnGibowc= -github.com/envoyproxy/go-control-plane v0.10.3-0.20230127155013-72157d335c8f/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= +github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= @@ -28,8 +28,8 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -61,17 +61,21 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= +google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= +google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= +google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk= -google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/go.mod b/go.mod index 593fae9e..5d1c386d 100644 --- a/go.mod +++ b/go.mod @@ -1,61 +1,59 @@ module github.com/envoyproxy/ratelimit -go 1.21.3 +go 1.21.5 require ( - github.com/alicebob/miniredis/v2 v2.23.0 - github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b - github.com/coocood/freecache v1.1.0 - github.com/envoyproxy/go-control-plane v0.10.3-0.20230127155013-72157d335c8f - github.com/golang/mock v1.4.4 - github.com/golang/protobuf v1.5.2 - github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 - github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 - github.com/kavu/go_reuseport v1.2.0 + github.com/alicebob/miniredis/v2 v2.31.0 + github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 + github.com/coocood/freecache v1.2.4 + github.com/envoyproxy/go-control-plane v0.11.1 + github.com/golang/mock v1.6.0 + github.com/golang/protobuf v1.5.3 + github.com/google/uuid v1.4.0 + github.com/gorilla/mux v1.8.1 + github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 + github.com/kavu/go_reuseport v1.5.0 github.com/kelseyhightower/envconfig v1.4.0 github.com/lyft/goruntime v0.3.0 - github.com/lyft/gostats v0.4.1 + github.com/lyft/gostats v0.4.12 github.com/mediocregopher/radix/v3 v3.8.1 - github.com/sirupsen/logrus v1.6.0 - github.com/stretchr/testify v1.8.1 - golang.org/x/net v0.17.0 - google.golang.org/grpc v1.52.0 - google.golang.org/protobuf v1.28.1 - gopkg.in/yaml.v2 v2.3.0 + github.com/sirupsen/logrus v1.9.3 + github.com/stretchr/testify v1.8.4 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 + go.opentelemetry.io/otel v1.21.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 + go.opentelemetry.io/otel/sdk v1.21.0 + go.opentelemetry.io/otel/trace v1.21.0 + golang.org/x/net v0.19.0 + google.golang.org/grpc v1.59.0 + google.golang.org/protobuf v1.31.0 + gopkg.in/yaml.v2 v2.4.0 ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect - github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.6.3 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) - -require ( - github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect - github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect - github.com/fsnotify/fsnotify v1.4.9 // indirect - github.com/google/uuid v1.3.0 - github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/objx v0.5.0 // indirect - github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 - go.opentelemetry.io/otel v1.7.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.6.3 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.6.3 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.6.3 - go.opentelemetry.io/otel/sdk v1.7.0 - go.opentelemetry.io/otel/trace v1.7.0 - go.opentelemetry.io/proto/otlp v0.19.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect + github.com/stretchr/objx v0.5.1 // indirect + github.com/yuin/gopher-lua v1.1.1 // indirect + go.opentelemetry.io/otel/metric v1.21.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect + google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 8a617e55..3da22d74 100644 --- a/go.sum +++ b/go.sum @@ -1,193 +1,102 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= -cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= +github.com/DmitriyVTitov/size v1.5.0/go.mod h1:le6rNI4CoLQV1b9gzp1+3d7hMAD/uu2QcJ+aYbNgiU0= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= -github.com/alicebob/miniredis/v2 v2.23.0 h1:+lwAJYjvvdIVg6doFHuotFjueJ/7KY10xo/vm3X3Scw= -github.com/alicebob/miniredis/v2 v2.23.0/go.mod h1:XNqvJdQJv5mSuVMc0ynneafpnL/zv52acZ6kqeS0t88= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b h1:L/QXpzIa3pOvUGt1D1lA5KjYhPBAN/3iWdP7xeFS9F0= -github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 h1:uvdUDbHQHO85qeSydJtItA4T55Pw6BtAejd0APRJOCE= +github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alicebob/miniredis/v2 v2.31.0 h1:ObEFUNlJwoIiyjxdrYF0QIDE7qXcLc7D3WpSH4c22PU= +github.com/alicebob/miniredis/v2 v2.31.0/go.mod h1:UB/T2Uztp7MlFSDakaX1sTXUv5CASoprx0wulRT6HBg= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 h1:N7oVaKyGp8bttX0bfZGmcGkjz7DLQXhAn3DNd3T0ous= +github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874/go.mod h1:r5xuitiExdLAJ09PR7vBVENGvp4ZuTBeWTGtxuX3K+c= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc h1:PYXxkRUBGUMa5xgMVMDl62vEklZvKpVaxQeN9ie7Hfk= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/coocood/freecache v1.1.0 h1:ENiHOsWdj1BrrlPwblhbn4GdAsMymK3pZORJ+bJGAjA= -github.com/coocood/freecache v1.1.0/go.mod h1:ePwxCDzOYvARfHdr1pByNct1at3CoKnsipOHwKlNbzI= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= +github.com/coocood/freecache v1.2.4 h1:UdR6Yz/X1HW4fZOuH0Z94KwG851GWOSknua5VUbb/5M= +github.com/coocood/freecache v1.2.4/go.mod h1:RBUWa/Cy+OHdfTGFEhEuE1pMCMX51Ncizj7rthiQ3vk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.3-0.20230127155013-72157d335c8f h1:nqACgqiYlDnB0znidh+8uhnQVLeqfW5NyyRfnGibowc= -github.com/envoyproxy/go-control-plane v0.10.3-0.20230127155013-72157d335c8f/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= +github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 h1:VQjjMh+uElTfioy6GnUrVrTMAiLTNF3xsrAlSwC+g8o= -github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 h1:lLT7ZLSzGLI08vc9cpd+tYmNWjdKDqyr/2L+f6U12Fk= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/kavu/go_reuseport v1.2.0 h1:YO+pt6m5Z3WkVH9DjaDJzoSS/0FO2Q8x3CfObxk/i2E= -github.com/kavu/go_reuseport v1.2.0/go.mod h1:CG8Ee7ceMFSMnx/xr25Vm0qXaj2Z4i5PWoUx+JZ5/CU= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 h1:6UKoz5ujsI55KNpsJH3UwCq3T8kKbZwNZBNPuTTje8U= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1/go.mod h1:YvJ2f6MplWDhfxiUC3KpyTy76kYUZA4W3pTv/wdKQ9Y= +github.com/kavu/go_reuseport v1.5.0 h1:UNuiY2OblcqAtVDE8Gsg1kZz8zbBWg907sP1ceBV+bk= +github.com/kavu/go_reuseport v1.5.0/go.mod h1:CG8Ee7ceMFSMnx/xr25Vm0qXaj2Z4i5PWoUx+JZ5/CU= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lyft/goruntime v0.3.0 h1:VLBYR4s3XazkUT8lLtq9CJrt58YmLQQumrK3ktenEkI= github.com/lyft/goruntime v0.3.0/go.mod h1:BW1gngSpMJR9P9w23BPUPdhdbUWhpirl98TQhOWWMF4= -github.com/lyft/gostats v0.4.1 h1:oR6p4HRCGxt0nUntmZIWmYMgyothBi3eZH2A71vRjsc= github.com/lyft/gostats v0.4.1/go.mod h1:Tpx2xRzz4t+T2Tx0xdVgIoBdR2UMVz+dKnE3X01XSd8= +github.com/lyft/gostats v0.4.12 h1:vaQMrsY4QH9GOeJUkZ7bHm8kqS92IhHuuwh7vTQ4qyQ= +github.com/lyft/gostats v0.4.12/go.mod h1:rMGud5RRaGYMG0KPS0GAUSBBs69yFMOMYjAnmcPTaG8= github.com/mediocregopher/radix/v3 v3.8.1 h1:rOkHflVuulFKlwsLY01/M2cM2tWCjDoETcMqKbAWu1M= github.com/mediocregopher/radix/v3 v3.8.1/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -195,357 +104,159 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.1 h1:4VhoImhV/Bm0ToFkXFi8hXNXwpDRZ/ynw3amt82mzq0= +github.com/stretchr/objx v0.5.1/go.mod h1:/iHQpkQwBD6DLUmQ4pE+s1TXdob1mORJ4/UFdrifcy0= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw= -github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= -go.opentelemetry.io/otel v1.6.1/go.mod h1:blzUabWHkX6LJewxvadmzafgh/wnvBSDBdOuwkAtrWQ= -go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= -go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM= -go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.6.3 h1:nAmg1WgsUXoXf46dJG9eS/AzOcvkCTK4xJSUYpWyHYg= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.6.3/go.mod h1:NEu79Xo32iVb+0gVNV8PMd7GoWqnyDXRlj04yFjqz40= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.6.3 h1:4/UjHWMVVc5VwX/KAtqJOHErKigMCH8NexChMuanb/o= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.6.3/go.mod h1:UJmXdiVVBaZ63umRUTwJuCMAV//GCMvDiQwn703/GoY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.6.3 h1:leYDq5psbM3K4QNcZ2juCj30LjUnvxjuYQj1mkGjXFM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.6.3/go.mod h1:ycItY/esVj8c0dKgYTOztTERXtPzcfDU/0o8EdwCjoA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.6.3 h1:ufVuVt/g16GZ/yDOyp+AcCGebGX8u4z7kDRuwEX0DkA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.6.3/go.mod h1:S18p8VK4KRHHyAg5rH3iUnJUcRvIUg9xwIWtq1MWibM= -go.opentelemetry.io/otel/sdk v1.6.3/go.mod h1:A4iWF7HTXa+GWL/AaqESz28VuSBIcZ+0CV+IzJ5NMiQ= -go.opentelemetry.io/otel/sdk v1.7.0 h1:4OmStpcKVOfvDOgCt7UriAPtKolwIhxpnSNI/yK+1B0= -go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU= -go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= -go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o= -go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= +github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= +github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= +golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4 h1:W12Pwm4urIbRdGhMEg2NM9O3TWKjNcxQhs46V0ypf/k= +google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4/go.mod h1:5RBcpGRxr25RbDzY5w+dmaqpSEvl8Gwl1x2CICf60ic= +google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 h1:ZcOkrmX74HbKFYnpPY8Qsw93fC29TbJXspYKaBkSXDQ= +google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4/go.mod h1:k2dtGpRrbsSyKcNPKKI5sstZkrNCZwpU/ns96JoHbGg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk= -google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= From 97c2f6f2a5a81f540c38f1385f846f0eaf72edd6 Mon Sep 17 00:00:00 2001 From: rayseaward <84980696+rayseaward@users.noreply.github.com> Date: Mon, 18 Dec 2023 20:02:03 -0400 Subject: [PATCH 080/181] Update alpine version to fix CVEs (#464) fixes CVEs CVE-2023-5678 CVE-2023-5363 as per https://github.com/alpinelinux/docker-alpine/issues/358 Signed-off-by: rayseaward <84980696+rayseaward@users.noreply.github.com> --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 03acaed1..871426d5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,6 +10,6 @@ COPY script script RUN CGO_ENABLED=0 GOOS=linux go build -o /go/bin/ratelimit -ldflags="-w -s" -v github.com/envoyproxy/ratelimit/src/service_cmd -FROM alpine:3.18.3 AS final +FROM alpine:3.18.5 AS final RUN apk --no-cache add ca-certificates && apk --no-cache update COPY --from=build /go/bin/ratelimit /bin/ratelimit From 9a1829496598e2cf0907b280685faa9e67402f74 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Sat, 6 Jan 2024 21:15:02 +0100 Subject: [PATCH 081/181] setup ossf scorecard and codql workflows (#466) * setup ossf scorecard and codql workflows Signed-off-by: Matthieu MOREL * Update scorecard.yml Signed-off-by: Matthieu MOREL * Update main.yaml Signed-off-by: Matthieu MOREL --------- Signed-off-by: Matthieu MOREL --- .github/dependabot.yml | 30 ++++++++++ .github/workflows/codeql.yml | 48 +++++++++++++++ .github/workflows/main.yaml | 17 +++--- .github/workflows/pullrequest.yaml | 13 ++-- .github/workflows/release.yaml | 11 ++-- .github/workflows/scorecard.yml | 69 ++++++++++++++++++++++ .github/workflows/stale.yml | 8 ++- Dockerfile | 4 +- Dockerfile.integration | 2 +- examples/xds-sotw-config-server/Dockerfile | 4 +- integration-test/Dockerfile.tester | 2 +- 11 files changed, 185 insertions(+), 23 deletions(-) create mode 100644 .github/dependabot.yml create mode 100644 .github/workflows/codeql.yml create mode 100644 .github/workflows/scorecard.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..c0891990 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,30 @@ +version: 2 +updates: + - package-ecosystem: docker + directory: / + schedule: + interval: weekly + - package-ecosystem: docker + directory: /examples/xds-sotw-config-server + schedule: + interval: weekly + - package-ecosystem: docker + directory: /integration-test + schedule: + interval: weekly + - package-ecosystem: github-actions + directory: / + schedule: + interval: weekly + - package-ecosystem: gomod + directory: / + schedule: + interval: weekly + - package-ecosystem: gomod + directory: /examples/xds-sotw-config-server + schedule: + interval: weekly + - package-ecosystem: pip + directory: / + schedule: + interval: weekly diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 00000000..357928b8 --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,48 @@ +name: "CodeQL" + +permissions: + actions: read + contents: read + security-events: write + +on: + push: + branches: ["main"] + pull_request: + branches: ["main"] + schedule: + - cron: "30 11 * * 6" + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + timeout-minutes: 360 + + strategy: + fail-fast: false + matrix: + language: ["go"] + + steps: + - name: Checkout repository + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Install Go + uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0 + with: + go-version-file: go.mod + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12 + with: + languages: ${{ matrix.language }} + + - name: Autobuild + uses: github/codeql-action/autobuild@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12 + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index ab692ae6..cdaec929 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -1,5 +1,8 @@ name: Build and push :master image +permissions: + contents: read + on: push: branches: @@ -9,21 +12,21 @@ jobs: check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 - name: check format run: make check_format build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@27d0a4f181a40b142cce983c5393082c365d1480 # v1.2.0 - name: Set up Docker buildx id: buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@f211e3e9ded2d9377c8cadc4489a4e38014bc4c9 # v1.7.0 - name: build and push docker image run: | @@ -38,13 +41,13 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 - - uses: actions/setup-python@v2 + - uses: actions/setup-python@e9aba2c848f5ebd159c070c61ea2c4e2b122355e # v2.3.4 with: python-version: "3.9" - - uses: actions/setup-go@v2 + - uses: actions/setup-go@bfdd3570ce990073878bf10f6b2d79082de49492 # v2.2.0 with: go-version: "1.21.5" diff --git a/.github/workflows/pullrequest.yaml b/.github/workflows/pullrequest.yaml index f3b17246..de63211a 100644 --- a/.github/workflows/pullrequest.yaml +++ b/.github/workflows/pullrequest.yaml @@ -1,5 +1,8 @@ name: CI Build and Test for PR +permissions: + contents: read + on: pull_request: @@ -8,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 - name: check format run: make check_format @@ -17,7 +20,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 - name: build and test run: make docker_tests @@ -26,13 +29,13 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 - - uses: actions/setup-python@v2 + - uses: actions/setup-python@e9aba2c848f5ebd159c070c61ea2c4e2b122355e # v2.3.4 with: python-version: "3.9" - - uses: actions/setup-go@v2 + - uses: actions/setup-go@bfdd3570ce990073878bf10f6b2d79082de49492 # v2.2.0 with: go-version: "1.21.5" diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 7b231131..dc6ff10a 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -1,5 +1,8 @@ name: Build and push :release image +permissions: + contents: read + on: push: tags: @@ -9,20 +12,20 @@ jobs: check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 - name: check format run: make check_format build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@27d0a4f181a40b142cce983c5393082c365d1480 # v1.2.0 - name: Set up Docker buildx id: buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@f211e3e9ded2d9377c8cadc4489a4e38014bc4c9 # v1.7.0 - name: build and push docker image run: | diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml new file mode 100644 index 00000000..ecab3939 --- /dev/null +++ b/.github/workflows/scorecard.yml @@ -0,0 +1,69 @@ +name: Scorecard supply-chain security + +permissions: + contents: read + +on: + # For Branch-Protection check. Only the default branch is supported. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection + branch_protection_rule: + # To guarantee Maintained check is occasionally updated. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained + schedule: + - cron: "31 17 * * 3" + push: + branches: ["main"] + +jobs: + analysis: + name: Scorecard analysis + runs-on: ubuntu-latest + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Needed to publish results and get a badge (see publish_results below). + id-token: write + # Uncomment the permissions below if installing in a private repository. + # contents: read + # actions: read + + steps: + - name: "Checkout code" + uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@e38b1902ae4f44df626f11ba0734b14fb91f8f86 # v2.1.2 + with: + results_file: results.sarif + results_format: sarif + # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecard on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat. + # repo_token: ${{ secrets.SCORECARD_TOKEN }} + + # Public repositories: + # - Publish results to OpenSSF REST API for easy access by consumers + # - Allows the repository to include the Scorecard badge. + # - See https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories: + # - `publish_results` will always be set to `false`, regardless + # of the value entered here. + publish_results: true + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # v3.1.0 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard. + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@17573ee1cc1b9d061760f3a006fc4aac4f944fd5 # v2.2.4 + with: + sarif_file: results.sarif diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 72a49005..b96b0ffa 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -1,3 +1,6 @@ +permissions: + contents: read + on: workflow_dispatch: schedule: @@ -5,12 +8,15 @@ on: jobs: prune_stale: + permissions: + issues: write # for actions/stale to close stale issues + pull-requests: write # for actions/stale to close stale PRs name: Prune Stale runs-on: ubuntu-latest steps: - name: Prune Stale - uses: actions/stale@v3.0.14 + uses: actions/stale@87c2b794b9b47a9bec68ae03c01aeb572ffebdb1 # v3.0.14 with: repo-token: ${{ secrets.GITHUB_TOKEN }} # Different amounts of days for issues/PRs are not currently supported but there is a PR diff --git a/Dockerfile b/Dockerfile index 871426d5..9e5c40ab 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.5 AS build +FROM golang:1.21.5@sha256:672a2286da3ee7a854c3e0a56e0838918d0dbb1c18652992930293312de898a6 AS build WORKDIR /ratelimit ENV GOPROXY=https://proxy.golang.org @@ -10,6 +10,6 @@ COPY script script RUN CGO_ENABLED=0 GOOS=linux go build -o /go/bin/ratelimit -ldflags="-w -s" -v github.com/envoyproxy/ratelimit/src/service_cmd -FROM alpine:3.18.5 AS final +FROM alpine:3.18.5@sha256:34871e7290500828b39e22294660bee86d966bc0017544e848dd9a255cdf59e0 AS final RUN apk --no-cache add ca-certificates && apk --no-cache update COPY --from=build /go/bin/ratelimit /bin/ratelimit diff --git a/Dockerfile.integration b/Dockerfile.integration index 13c0f957..25cae665 100644 --- a/Dockerfile.integration +++ b/Dockerfile.integration @@ -1,5 +1,5 @@ # Running this docker image runs the integration tests. -FROM golang:1.21.5 +FROM golang@sha256:672a2286da3ee7a854c3e0a56e0838918d0dbb1c18652992930293312de898a6 RUN apt-get update -y && apt-get install sudo stunnel4 redis memcached -y && rm -rf /var/lib/apt/lists/* diff --git a/examples/xds-sotw-config-server/Dockerfile b/examples/xds-sotw-config-server/Dockerfile index a93181d1..98388a72 100644 --- a/examples/xds-sotw-config-server/Dockerfile +++ b/examples/xds-sotw-config-server/Dockerfile @@ -1,11 +1,11 @@ -FROM golang:1.21.5 AS build +FROM golang:1.21.5@sha256:672a2286da3ee7a854c3e0a56e0838918d0dbb1c18652992930293312de898a6 AS build WORKDIR /xds-server COPY . . RUN CGO_ENABLED=0 GOOS=linux go build -o /go/bin/xds-server -v main/main.go -FROM alpine:3.16 AS final +FROM alpine:3.16@sha256:e4cdb7d47b06ba0a062ad2a97a7d154967c8f83934594d9f2bd3efa89292996b AS final RUN apk --no-cache add ca-certificates && apk --no-cache update COPY --from=build /go/bin/xds-server /bin/xds-server ENTRYPOINT [ "/bin/xds-server" ] diff --git a/integration-test/Dockerfile.tester b/integration-test/Dockerfile.tester index ff4a5c8b..18979fd9 100644 --- a/integration-test/Dockerfile.tester +++ b/integration-test/Dockerfile.tester @@ -1,4 +1,4 @@ -FROM alpine:latest +FROM alpine@sha256:51b67269f354137895d43f3b3d810bfacd3945438e94dc5ac55fdac340352f48 USER root From f55237190ad48c99eb1116b075af980c1b2544ee Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 11 Jan 2024 00:33:39 -0800 Subject: [PATCH 082/181] Bump actions/setup-python from 2.3.4 to 5.0.0 (#478) Bumps [actions/setup-python](https://github.com/actions/setup-python) from 2.3.4 to 5.0.0. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/e9aba2c848f5ebd159c070c61ea2c4e2b122355e...0a5c61591373683505ea898e09a3ea4f39ef2b9c) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/main.yaml | 2 +- .github/workflows/pullrequest.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index cdaec929..6d596526 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -43,7 +43,7 @@ jobs: steps: - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 - - uses: actions/setup-python@e9aba2c848f5ebd159c070c61ea2c4e2b122355e # v2.3.4 + - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 with: python-version: "3.9" diff --git a/.github/workflows/pullrequest.yaml b/.github/workflows/pullrequest.yaml index de63211a..1289fad6 100644 --- a/.github/workflows/pullrequest.yaml +++ b/.github/workflows/pullrequest.yaml @@ -31,7 +31,7 @@ jobs: steps: - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 - - uses: actions/setup-python@e9aba2c848f5ebd159c070c61ea2c4e2b122355e # v2.3.4 + - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 with: python-version: "3.9" From 35f3e3a0c400242dc2acdc580984d0b1212c31f9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 12 Jan 2024 15:32:54 -0800 Subject: [PATCH 083/181] Bump ossf/scorecard-action from 2.1.2 to 2.3.1 (#469) Bumps [ossf/scorecard-action](https://github.com/ossf/scorecard-action) from 2.1.2 to 2.3.1. - [Release notes](https://github.com/ossf/scorecard-action/releases) - [Changelog](https://github.com/ossf/scorecard-action/blob/main/RELEASE.md) - [Commits](https://github.com/ossf/scorecard-action/compare/e38b1902ae4f44df626f11ba0734b14fb91f8f86...0864cf19026789058feabb7e87baa5f140aac736) --- updated-dependencies: - dependency-name: ossf/scorecard-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/scorecard.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index ecab3939..20321ba2 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -34,7 +34,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@e38b1902ae4f44df626f11ba0734b14fb91f8f86 # v2.1.2 + uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1 with: results_file: results.sarif results_format: sarif From d87236e34501996e2a608de7306f5a038620022f Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Mon, 15 Jan 2024 17:23:14 +0100 Subject: [PATCH 084/181] add OSSF Scorecard badge to README.md (#482) Signed-off-by: Matthieu MOREL --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 4823013f..9dd39a7f 100644 --- a/README.md +++ b/README.md @@ -68,6 +68,8 @@ applications. Applications request a rate limit decision based on a domain and a reads the configuration from disk via [runtime](https://github.com/lyft/goruntime), composes a cache key, and talks to the Redis cache. A decision is then returned to the caller. +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/envoyproxy/ratelimit/badge)](https://securityscorecards.dev/viewer/?uri=github.com/envoyproxy/ratelimit) + # Docker Image For every main commit, an image is pushed to [Dockerhub](https://hub.docker.com/r/envoyproxy/ratelimit/tags?page=1&ordering=last_updated). There is currently no versioning (post v1.4.0) and tags are based on commit sha. From 763fd8ea782296bce54990e9d6a26063a964da1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Szcz=C4=99sny?= Date: Mon, 15 Jan 2024 17:47:39 +0100 Subject: [PATCH 085/181] stats: sanitize metric names (#481) * stats: sanitize metric names If the configuration contains a key value with a `:` or `|`, e.g. ``` - key: path value: "/foo:*" rate_limit: unit: minute requests_per_unit: 20 ``` the reported statsd metrics are malformed. Signed-off-by: Lukasz Szczesny * Add tests Signed-off-by: Lukasz Szczesny --------- Signed-off-by: Lukasz Szczesny --- src/stats/manager_impl.go | 2 ++ src/utils/utilities.go | 6 ++++ test/mocks/stats/manager.go | 2 ++ test/stats/manager_impl_test.go | 56 +++++++++++++++++++++++++++++++++ 4 files changed, 66 insertions(+) create mode 100644 test/stats/manager_impl_test.go diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index effad309..efe8aa07 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -5,6 +5,7 @@ import ( logger "github.com/sirupsen/logrus" "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/utils" ) func NewStatManager(store gostats.Store, settings settings.Settings) *ManagerImpl { @@ -28,6 +29,7 @@ func (this *ManagerImpl) NewStats(key string) RateLimitStats { ret := RateLimitStats{} logger.Debugf("Creating stats for key: '%s'", key) ret.Key = key + key = utils.SanitizeStatName(key) ret.TotalHits = this.rlStatsScope.NewCounter(key + ".total_hits") ret.OverLimit = this.rlStatsScope.NewCounter(key + ".over_limit") ret.NearLimit = this.rlStatsScope.NewCounter(key + ".near_limit") diff --git a/src/utils/utilities.go b/src/utils/utilities.go index f9ecf856..48f7f7ca 100644 --- a/src/utils/utilities.go +++ b/src/utils/utilities.go @@ -61,3 +61,9 @@ func MaskCredentialsInUrl(url string) string { return strings.Join(urls, ",") } + +// Remove invalid characters from the stat name. +func SanitizeStatName(s string) string { + r := strings.NewReplacer(":", "_", "|", "_") + return r.Replace(s) +} diff --git a/test/mocks/stats/manager.go b/test/mocks/stats/manager.go index 14850ac6..dd9db246 100644 --- a/test/mocks/stats/manager.go +++ b/test/mocks/stats/manager.go @@ -5,6 +5,7 @@ import ( logger "github.com/sirupsen/logrus" "github.com/envoyproxy/ratelimit/src/stats" + "github.com/envoyproxy/ratelimit/src/utils" ) type MockStatManager struct { @@ -36,6 +37,7 @@ func (m *MockStatManager) NewStats(key string) stats.RateLimitStats { ret := stats.RateLimitStats{} logger.Debugf("outputing test gostats %s", key) ret.Key = key + key = utils.SanitizeStatName(key) ret.TotalHits = m.store.NewCounter(key + ".total_hits") ret.OverLimit = m.store.NewCounter(key + ".over_limit") ret.NearLimit = m.store.NewCounter(key + ".near_limit") diff --git a/test/stats/manager_impl_test.go b/test/stats/manager_impl_test.go new file mode 100644 index 00000000..793ce835 --- /dev/null +++ b/test/stats/manager_impl_test.go @@ -0,0 +1,56 @@ +package test_stats + +import ( + "fmt" + "testing" + + gostats "github.com/lyft/gostats" + gostatsMock "github.com/lyft/gostats/mock" + "github.com/stretchr/testify/assert" + + "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/stats" +) + +func TestEscapingInvalidChartersInMetricName(t *testing.T) { + mockSink := gostatsMock.NewSink() + statsStore := gostats.NewStore(mockSink, false) + statsManager := stats.NewStatManager(statsStore, settings.Settings{}) + + tests := []struct { + name string + key string + want string + }{ + { + name: "use not modified key if it does not contain special characters", + key: "path_/foo/bar", + want: "path_/foo/bar", + }, + { + name: "escape colon", + key: "path_/foo:*:bar", + want: "path_/foo_*_bar", + }, + { + name: "escape pipe", + key: "path_/foo|bar|baz", + want: "path_/foo_bar_baz", + }, + { + name: "escape all special characters", + key: "path_/foo:bar|baz", + want: "path_/foo_bar_baz", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + stats := statsManager.NewStats(tt.key) + assert.Equal(t, tt.key, stats.Key) + + stats.TotalHits.Inc() + statsManager.GetStatsStore().Flush() + mockSink.AssertCounterExists(t, fmt.Sprintf("ratelimit.service.rate_limit.%s.total_hits", tt.want)) + }) + } +} From 9901a9bf860a796df97a864a50a8e4c7ac65a9de Mon Sep 17 00:00:00 2001 From: Alekhya Kondapuram <152326265+akondapuram@users.noreply.github.com> Date: Wed, 7 Feb 2024 07:08:04 -0800 Subject: [PATCH 086/181] Add detailed_metric support for xds-config (#465) Signed-off-by: alekhya.kondapuram --- .../config/ratelimit/v3/rls_conf.proto | 16 ++--------- .../service/ratelimit/v3/rls_conf_ds.proto | 25 +++++++++++++++++ examples/xds-sotw-config-server/go.mod | 5 ++-- examples/xds-sotw-config-server/go.sum | 14 ++++++---- examples/xds-sotw-config-server/resource.go | 3 +- go.mod | 5 ++-- go.sum | 10 ++++--- ...-included-in-stats-key-when-unspecified.sh | 2 +- src/config/config.go | 16 +++++------ src/config/config_impl.go | 28 +++++++++---------- src/config/config_xds.go | 11 ++++---- 11 files changed, 79 insertions(+), 56 deletions(-) create mode 100644 api/ratelimit/service/ratelimit/v3/rls_conf_ds.proto diff --git a/api/ratelimit/config/ratelimit/v3/rls_conf.proto b/api/ratelimit/config/ratelimit/v3/rls_conf.proto index cdb1836f..a1b90fc4 100644 --- a/api/ratelimit/config/ratelimit/v3/rls_conf.proto +++ b/api/ratelimit/config/ratelimit/v3/rls_conf.proto @@ -42,6 +42,9 @@ message RateLimitDescriptor { // Mark the descriptor as shadow. When the values is true, rate limit service allow requests to the backend. bool shadow_mode = 5; + + // Setting the `detailed_metric: true` for a descriptor will extend the metrics that are produced. + bool detailed_metric = 6; } // Rate-limit policy. @@ -89,16 +92,3 @@ enum RateLimitUnit { // The time unit representing a day. DAY = 4; } - -// [#protodoc-title: Rate Limit Config Discovery Service (RLS Conf DS)] - -// Return list of all rate limit configs that rate limit service should be configured with. -service RateLimitConfigDiscoveryService { - rpc StreamRlsConfigs(stream envoy.service.discovery.v3.DiscoveryRequest) - returns (stream envoy.service.discovery.v3.DiscoveryResponse) { - } - - rpc FetchRlsConfigs(envoy.service.discovery.v3.DiscoveryRequest) - returns (envoy.service.discovery.v3.DiscoveryResponse) { - } -} diff --git a/api/ratelimit/service/ratelimit/v3/rls_conf_ds.proto b/api/ratelimit/service/ratelimit/v3/rls_conf_ds.proto new file mode 100644 index 00000000..3aa6525c --- /dev/null +++ b/api/ratelimit/service/ratelimit/v3/rls_conf_ds.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; + +package ratelimit.service.ratelimit.v3; + +import "envoy/service/discovery/v3/discovery.proto"; + +option java_package = "io.envoyproxy.ratelimit.service.config.v3"; +option java_outer_classname = "RlsConfigProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/ratelimit/service/config/v3;configv3"; +option java_generic_services = true; + +// [#protodoc-title: Rate Limit Config Discovery Service (RLS Conf DS)] + +// Return list of all rate limit configs that rate limit service should be configured with. +service RateLimitConfigDiscoveryService { + + rpc StreamRlsConfigs(stream envoy.service.discovery.v3.DiscoveryRequest) + returns (stream envoy.service.discovery.v3.DiscoveryResponse) { + } + + rpc FetchRlsConfigs(envoy.service.discovery.v3.DiscoveryRequest) + returns (envoy.service.discovery.v3.DiscoveryResponse) { + } +} diff --git a/examples/xds-sotw-config-server/go.mod b/examples/xds-sotw-config-server/go.mod index a6162ea0..2b262621 100644 --- a/examples/xds-sotw-config-server/go.mod +++ b/examples/xds-sotw-config-server/go.mod @@ -3,7 +3,7 @@ module github.com/envoyproxy/ratelimit/examples/xds-sotw-config-server go 1.21.5 require ( - github.com/envoyproxy/go-control-plane v0.11.1 + github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b google.golang.org/grpc v1.59.0 ) @@ -12,11 +12,12 @@ require ( github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect github.com/golang/protobuf v1.5.3 // indirect + github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/sys v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/protobuf v1.32.0 // indirect ) diff --git a/examples/xds-sotw-config-server/go.sum b/examples/xds-sotw-config-server/go.sum index b923eb81..67029dff 100644 --- a/examples/xds-sotw-config-server/go.sum +++ b/examples/xds-sotw-config-server/go.sum @@ -9,8 +9,8 @@ github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWH github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= -github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= +github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b h1:M0BhcNaW04UV1haQO8IFSDB64dAeiBSsTMZks/sYDcQ= +github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b/go.mod h1:lFu6itz1hckLR2A3aJ+ZKf3lu8HpjTsJSsqvVF6GL6g= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= @@ -25,11 +25,13 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 h1:pH+U6pJP0BhxqQ4njBUjOg0++WMMvv3eByWzB+oATBY= +github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -74,8 +76,8 @@ google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/examples/xds-sotw-config-server/resource.go b/examples/xds-sotw-config-server/resource.go index 71df6324..5792f156 100644 --- a/examples/xds-sotw-config-server/resource.go +++ b/examples/xds-sotw-config-server/resource.go @@ -93,7 +93,8 @@ func makeRlsConfig() []types.Resource { }, Descriptors: []*rls_config.RateLimitDescriptor{ { - Key: "bar", + Key: "bar", + DetailedMetric: true, RateLimit: &rls_config.RateLimitPolicy{ Unit: rls_config.RateLimitUnit_MINUTE, RequestsPerUnit: 3, diff --git a/go.mod b/go.mod index 5d1c386d..fe94a09f 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/alicebob/miniredis/v2 v2.31.0 github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 github.com/coocood/freecache v1.2.4 - github.com/envoyproxy/go-control-plane v0.11.1 + github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.3 github.com/google/uuid v1.4.0 @@ -28,7 +28,7 @@ require ( go.opentelemetry.io/otel/trace v1.21.0 golang.org/x/net v0.19.0 google.golang.org/grpc v1.59.0 - google.golang.org/protobuf v1.31.0 + google.golang.org/protobuf v1.32.0 gopkg.in/yaml.v2 v2.4.0 ) @@ -44,6 +44,7 @@ require ( github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 // indirect + github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/objx v0.5.1 // indirect github.com/yuin/gopher-lua v1.1.1 // indirect diff --git a/go.sum b/go.sum index 3da22d74..9b62a6f4 100644 --- a/go.sum +++ b/go.sum @@ -37,8 +37,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= -github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= +github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b h1:M0BhcNaW04UV1haQO8IFSDB64dAeiBSsTMZks/sYDcQ= +github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b/go.mod h1:lFu6itz1hckLR2A3aJ+ZKf3lu8HpjTsJSsqvVF6GL6g= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= @@ -101,6 +101,8 @@ github.com/mediocregopher/radix/v3 v3.8.1 h1:rOkHflVuulFKlwsLY01/M2cM2tWCjDoETcM github.com/mediocregopher/radix/v3 v3.8.1/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 h1:pH+U6pJP0BhxqQ4njBUjOg0++WMMvv3eByWzB+oATBY= +github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -244,8 +246,8 @@ google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/integration-test/scripts/value-included-in-stats-key-when-unspecified.sh b/integration-test/scripts/value-included-in-stats-key-when-unspecified.sh index a3851cef..6fc8b486 100755 --- a/integration-test/scripts/value-included-in-stats-key-when-unspecified.sh +++ b/integration-test/scripts/value-included-in-stats-key-when-unspecified.sh @@ -24,7 +24,7 @@ if [ $? -eq 0 ]; then fi # Sleep a bit to allow the stats to be propagated -sleep 2 +sleep 5 # Extract the metric for the unspecified value, which shoulb be there due to the "detailed_metric" stats=$(curl -f -s statsd:9102/metrics | grep -e ratelimit_service_rate_limit_over_limit | grep unspec_unspecified_value | cut -d} -f2 | sed 's/ //g') diff --git a/src/config/config.go b/src/config/config.go index ca24d1e2..d1886033 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -17,14 +17,14 @@ func (e RateLimitConfigError) Error() string { // Wrapper for an individual rate limit config entry which includes the defined limit and stats. type RateLimit struct { - FullKey string - Stats stats.RateLimitStats - Limit *pb.RateLimitResponse_RateLimit - Unlimited bool - ShadowMode bool - Name string - Replaces []string - IncludeValueInMetricWhenNotSpecified bool + FullKey string + Stats stats.RateLimitStats + Limit *pb.RateLimitResponse_RateLimit + Unlimited bool + ShadowMode bool + Name string + Replaces []string + DetailedMetric bool } // Interface for interacting with a loaded rate limit config. diff --git a/src/config/config_impl.go b/src/config/config_impl.go index 2bdbf9d4..455b0861 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -26,12 +26,12 @@ type YamlRateLimit struct { } type YamlDescriptor struct { - Key string - Value string - RateLimit *YamlRateLimit `yaml:"rate_limit"` - Descriptors []YamlDescriptor - ShadowMode bool `yaml:"shadow_mode"` - IncludeMetricsForUnspecifiedValue bool `yaml:"detailed_metric"` + Key string + Value string + RateLimit *YamlRateLimit `yaml:"rate_limit"` + Descriptors []YamlDescriptor + ShadowMode bool `yaml:"shadow_mode"` + DetailedMetric bool `yaml:"detailed_metric"` } type YamlRoot struct { @@ -77,7 +77,7 @@ var validKeys = map[string]bool{ // @param unlimited supplies whether the rate limit is unlimited // @return the new config entry. func NewRateLimit(requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stats.RateLimitStats, - unlimited bool, shadowMode bool, name string, replaces []string, includeValueInMetricWhenNotSpecified bool) *RateLimit { + unlimited bool, shadowMode bool, name string, replaces []string, detailedMetric bool) *RateLimit { return &RateLimit{ FullKey: rlStats.GetKey(), @@ -86,11 +86,11 @@ func NewRateLimit(requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Un RequestsPerUnit: requestsPerUnit, Unit: unit, }, - Unlimited: unlimited, - ShadowMode: shadowMode, - Name: name, - Replaces: replaces, - IncludeValueInMetricWhenNotSpecified: includeValueInMetricWhenNotSpecified, + Unlimited: unlimited, + ShadowMode: shadowMode, + Name: name, + Replaces: replaces, + DetailedMetric: detailedMetric, } } @@ -167,7 +167,7 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p rateLimit = NewRateLimit( descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), statsManager.NewStats(newParentKey), unlimited, descriptorConfig.ShadowMode, - descriptorConfig.RateLimit.Name, replaces, descriptorConfig.IncludeMetricsForUnspecifiedValue, + descriptorConfig.RateLimit.Name, replaces, descriptorConfig.DetailedMetric, ) rateLimitDebugString = fmt.Sprintf( " ratelimit={requests_per_unit=%d, unit=%s, unlimited=%t, shadow_mode=%t}", rateLimit.Limit.RequestsPerUnit, @@ -342,7 +342,7 @@ func (this *rateLimitConfigImpl) GetLimit( logger.Debugf("iterating to next level") descriptorsMap = nextDescriptor.descriptors } else { - if rateLimit != nil && rateLimit.IncludeValueInMetricWhenNotSpecified { + if rateLimit != nil && rateLimit.DetailedMetric { rateLimit = NewRateLimit(rateLimit.Limit.RequestsPerUnit, rateLimit.Limit.Unit, this.statsManager.NewStats(rateLimit.FullKey+"_"+entry.Value), rateLimit.Unlimited, rateLimit.ShadowMode, rateLimit.Name, rateLimit.Replaces, false) } diff --git a/src/config/config_xds.go b/src/config/config_xds.go index 1e772c36..f6c67ce2 100644 --- a/src/config/config_xds.go +++ b/src/config/config_xds.go @@ -16,11 +16,12 @@ func rateLimitDescriptorsPbToYaml(pb []*rls_conf_v3.RateLimitDescriptor) []YamlD descriptors := make([]YamlDescriptor, len(pb)) for i, d := range pb { descriptors[i] = YamlDescriptor{ - Key: d.Key, - Value: d.Value, - RateLimit: rateLimitPolicyPbToYaml(d.RateLimit), - Descriptors: rateLimitDescriptorsPbToYaml(d.Descriptors), - ShadowMode: d.ShadowMode, + Key: d.Key, + Value: d.Value, + RateLimit: rateLimitPolicyPbToYaml(d.RateLimit), + Descriptors: rateLimitDescriptorsPbToYaml(d.Descriptors), + ShadowMode: d.ShadowMode, + DetailedMetric: d.DetailedMetric, } } From f3b67307a53c3979783bef4de8bfa655167b0807 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 18 Feb 2024 09:11:15 -0800 Subject: [PATCH 087/181] Bump actions/setup-go from 2.2.0 to 5.0.0 (#484) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 2.2.0 to 5.0.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v2.2.0...0c52d547c9bc32b1aa3301fd7a9cb496313a4491) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 2 +- .github/workflows/main.yaml | 2 +- .github/workflows/pullrequest.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 357928b8..bb99e55c 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -29,7 +29,7 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Install Go - uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0 + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version-file: go.mod diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 6d596526..4aab2609 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -47,7 +47,7 @@ jobs: with: python-version: "3.9" - - uses: actions/setup-go@bfdd3570ce990073878bf10f6b2d79082de49492 # v2.2.0 + - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: "1.21.5" diff --git a/.github/workflows/pullrequest.yaml b/.github/workflows/pullrequest.yaml index 1289fad6..2ccf0030 100644 --- a/.github/workflows/pullrequest.yaml +++ b/.github/workflows/pullrequest.yaml @@ -35,7 +35,7 @@ jobs: with: python-version: "3.9" - - uses: actions/setup-go@bfdd3570ce990073878bf10f6b2d79082de49492 # v2.2.0 + - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: "1.21.5" From 19f2079f607dfd3c72fbb0609a5390f9c19b2e5e Mon Sep 17 00:00:00 2001 From: Alekhya Kondapuram <152326265+akondapuram@users.noreply.github.com> Date: Fri, 23 Feb 2024 14:00:48 -0800 Subject: [PATCH 088/181] Retry the grpc connection when there's an error (#503) Signed-off-by: alekhya.kondapuram --- README.md | 8 ++++++++ go.mod | 1 + go.sum | 2 ++ src/provider/xds_grpc_sotw_provider.go | 23 ++++++++++++++++++----- src/settings/settings.go | 6 ++++++ 5 files changed, 35 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 9dd39a7f..25a698d8 100644 --- a/README.md +++ b/README.md @@ -647,7 +647,15 @@ To enable this behavior set `MERGE_DOMAIN_CONFIG` to `true`. xDS Management Server is a gRPC server which implements the [Aggregated Discovery Service (ADS)](https://github.com/envoyproxy/data-plane-api/blob/97b6dae39046f7da1331a4dc57830d20e842fc26/envoy/service/discovery/v3/ads.proto). The xDS Management server serves [Discovery Response](https://github.com/envoyproxy/data-plane-api/blob/97b6dae39046f7da1331a4dc57830d20e842fc26/envoy/service/discovery/v3/discovery.proto#L69) with [Ratelimit Configuration Resources](api/ratelimit/config/ratelimit/v3/rls_conf.proto) and with Type URL `"type.googleapis.com/ratelimit.config.ratelimit.v3.RateLimitConfig"`. + The xDS client in the Rate limit service configure Rate limit service with the provided configuration. +In case of connection failures, the xDS Client retries the connection to the xDS server with exponential backoff and the backoff parameters are configurable. + +1. `XDS_CLIENT_BACKOFF_JITTER`: set to `"true"` to add jitter to the exponential backoff. +2. `XDS_CLIENT_BACKOFF_INITIAL_INTERVAL`: The base amount of time the xDS client waits before retyring the connection after failure. Default: "10s" +3. `XDS_CLIENT_BACKOFF_MAX_INTERVAL`: The max backoff interval is the upper limit on the amount of time the xDS client will wait between retries. After reaching the max backoff interval, the next retries will continue using the max interval. Default: "60s" +4. `XDS_CLIENT_BACKOFF_RANDOM_FACTOR`: This is a factor by which the initial interval is multiplied to calculate the next backoff interval. Default: "0.5" + For more information on xDS protocol please refer to the [envoy proxy documentation](https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol). You can refer to [the sample xDS configuration management server](examples/xds-sotw-config-server/README.md). diff --git a/go.mod b/go.mod index fe94a09f..45ef3a66 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( github.com/google/uuid v1.4.0 github.com/gorilla/mux v1.8.1 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 + github.com/jpillora/backoff v1.0.0 github.com/kavu/go_reuseport v1.5.0 github.com/kelseyhightower/envconfig v1.4.0 github.com/lyft/goruntime v0.3.0 diff --git a/go.sum b/go.sum index 9b62a6f4..2b8a48b3 100644 --- a/go.sum +++ b/go.sum @@ -77,6 +77,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDa github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 h1:6UKoz5ujsI55KNpsJH3UwCq3T8kKbZwNZBNPuTTje8U= github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1/go.mod h1:YvJ2f6MplWDhfxiUC3KpyTy76kYUZA4W3pTv/wdKQ9Y= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/kavu/go_reuseport v1.5.0 h1:UNuiY2OblcqAtVDE8Gsg1kZz8zbBWg907sP1ceBV+bk= github.com/kavu/go_reuseport v1.5.0/go.mod h1:CG8Ee7ceMFSMnx/xr25Vm0qXaj2Z4i5PWoUx+JZ5/CU= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= diff --git a/src/provider/xds_grpc_sotw_provider.go b/src/provider/xds_grpc_sotw_provider.go index fcacc212..9dc9faee 100644 --- a/src/provider/xds_grpc_sotw_provider.go +++ b/src/provider/xds_grpc_sotw_provider.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "strings" + "time" "google.golang.org/grpc/metadata" @@ -11,6 +12,7 @@ import ( "github.com/envoyproxy/go-control-plane/pkg/resource/v3" "github.com/golang/protobuf/ptypes/any" grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" + "github.com/jpillora/backoff" logger "github.com/sirupsen/logrus" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -67,6 +69,12 @@ func (p *XdsGrpcSotwProvider) Stop() { func (p *XdsGrpcSotwProvider) initXdsClient() { logger.Info("Starting xDS client connection for rate limit configurations") conn := p.initializeAndWatch() + b := &backoff.Backoff{ + Min: p.settings.XdsClientBackoffInitialInterval, + Max: p.settings.XdsClientBackoffMaxInterval, + Factor: p.settings.XdsClientBackoffRandomFactor, + Jitter: p.settings.XdsClientBackoffJitter, + } for retryEvent := range p.connectionRetryChannel { if conn != nil { @@ -76,10 +84,18 @@ func (p *XdsGrpcSotwProvider) initXdsClient() { logger.Info("Stopping xDS client watch for rate limit configurations") break } + d := p.getJitteredExponentialBackOffDuration(b) + logger.Debugf("Sleeping for %s using exponential backoff\n", d) + time.Sleep(d) conn = p.initializeAndWatch() } } +func (p *XdsGrpcSotwProvider) getJitteredExponentialBackOffDuration(b *backoff.Backoff) time.Duration { + logger.Debugf("Retry attempt# %f", b.Attempt()) + return b.Duration() +} + func (p *XdsGrpcSotwProvider) initializeAndWatch() *grpc.ClientConn { conn, err := p.getGrpcConnection() if err != nil { @@ -99,11 +115,8 @@ func (p *XdsGrpcSotwProvider) watchConfigs() { resp, err := p.adsClient.Fetch() if err != nil { logger.Errorf("Failed to receive configuration from xDS Management Server: %s", err.Error()) - if sotw.IsConnError(err) { - p.retryGrpcConn() - return - } - p.adsClient.Nack(err.Error()) + p.retryGrpcConn() + return } else { logger.Tracef("Response received from xDS Management Server: %v", resp) p.sendConfigs(resp.Resources) diff --git a/src/settings/settings.go b/src/settings/settings.go index 09704781..f3c3721d 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -69,6 +69,12 @@ type Settings struct { // GrpcClientTlsSAN is the SAN to validate from the client cert during mTLS auth ConfigGrpcXdsServerTlsSAN string `envconfig:"CONFIG_GRPC_XDS_SERVER_TLS_SAN" default:""` + // xDS client backoff configuration + XdsClientBackoffInitialInterval time.Duration `envconfig:"XDS_CLIENT_BACKOFF_INITIAL_INTERVAL" default:"10s"` + XdsClientBackoffMaxInterval time.Duration `envconfig:"XDS_CLIENT_BACKOFF_MAX_INTERVAL" default:"60s"` + XdsClientBackoffRandomFactor float64 `envconfig:"XDS_CLIENT_BACKOFF_RANDOM_FACTOR" default:"0.5"` + XdsClientBackoffJitter bool `envconfig:"XDS_CLIENT_BACKOFF_JITTER" default:"true"` + // Stats-related settings UseStatsd bool `envconfig:"USE_STATSD" default:"true"` StatsdHost string `envconfig:"STATSD_HOST" default:"localhost"` From 78ed1dc7c6a13a860945c1b11e6a65153c49a5ed Mon Sep 17 00:00:00 2001 From: Guilhem Lettron Date: Wed, 27 Mar 2024 16:56:02 +0100 Subject: [PATCH 089/181] fix: init gostats to not flood log (#520) Default gostats store flood messages on stderr except if statd or if GOSTATS_LOGGING_SINK_DISABLED is set to "true". This commit init store in a unambiguous way. Signed-off-by: Guilhem Lettron --- src/service_cmd/runner/runner.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index f59ce445..cf7e876e 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -38,8 +38,19 @@ type Runner struct { } func NewRunner(s settings.Settings) Runner { + + var store gostats.Store + // use statsd + if s.UseStatsd { + store = gostats.NewStore(gostats.NewTCPStatsdSink(gostats.WithStatsdHost(s.StatsdHost), gostats.WithStatsdPort(s.StatsdPort)), false) + } else { + store = gostats.NewStore(gostats.NewNullSink(), false) + } + + go store.Start(time.NewTicker(10 * time.Second)) + return Runner{ - statsManager: stats.NewStatManager(gostats.NewDefaultStore(), s), + statsManager: stats.NewStatManager(store, s), settings: s, } } From 3678e41b488269aa080ae9dfdf427839b2f24ded Mon Sep 17 00:00:00 2001 From: Guilhem Lettron Date: Fri, 29 Mar 2024 17:24:56 +0100 Subject: [PATCH 090/181] chore: remove trailing line (#545) fix #520 trailing line Signed-off-by: Guilhem Lettron --- src/service_cmd/runner/runner.go | 1 - 1 file changed, 1 deletion(-) diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index cf7e876e..6e125be9 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -38,7 +38,6 @@ type Runner struct { } func NewRunner(s settings.Settings) Runner { - var store gostats.Store // use statsd if s.UseStatsd { From 4c3754b8b15a4d4b4633272179a63c59cab95e7b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 29 Mar 2024 12:09:16 -0700 Subject: [PATCH 091/181] Bump github/codeql-action from 2.2.4 to 3.24.9 (#543) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2.2.4 to 3.24.9. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/v2.2.4...1b1aada464948af03b950897e5eb522f92603cc2) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecard.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index bb99e55c..28c14380 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -35,14 +35,14 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12 + uses: github/codeql-action/init@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12 + uses: github/codeql-action/autobuild@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12 + uses: github/codeql-action/analyze@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 20321ba2..b5a2354f 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -64,6 +64,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@17573ee1cc1b9d061760f3a006fc4aac4f944fd5 # v2.2.4 + uses: github/codeql-action/upload-sarif@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 with: sarif_file: results.sarif From 4b1dcb24d245868a5dc47ec3a431159c86a4d2bc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 29 Mar 2024 12:09:42 -0700 Subject: [PATCH 092/181] Bump actions/checkout from 2.7.0 to 4.1.2 (#541) Bumps [actions/checkout](https://github.com/actions/checkout) from 2.7.0 to 4.1.2. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v2.7.0...9bb56186c3b09b4f86b1c65136769dd318469633) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 2 +- .github/workflows/main.yaml | 6 +++--- .github/workflows/pullrequest.yaml | 6 +++--- .github/workflows/release.yaml | 4 ++-- .github/workflows/scorecard.yml | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 28c14380..a3f9ccd3 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -26,7 +26,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - name: Install Go uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 4aab2609..42374e23 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -12,14 +12,14 @@ jobs: check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - name: check format run: make check_format build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - name: Set up QEMU uses: docker/setup-qemu-action@27d0a4f181a40b142cce983c5393082c365d1480 # v1.2.0 @@ -41,7 +41,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 with: diff --git a/.github/workflows/pullrequest.yaml b/.github/workflows/pullrequest.yaml index 2ccf0030..b7f58742 100644 --- a/.github/workflows/pullrequest.yaml +++ b/.github/workflows/pullrequest.yaml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - name: check format run: make check_format @@ -20,7 +20,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - name: build and test run: make docker_tests @@ -29,7 +29,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 with: diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index dc6ff10a..95e1801e 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -12,13 +12,13 @@ jobs: check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - name: check format run: make check_format build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 + - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - name: Set up QEMU uses: docker/setup-qemu-action@27d0a4f181a40b142cce983c5393082c365d1480 # v1.2.0 diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index b5a2354f..5fc42540 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -29,7 +29,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0 + uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 with: persist-credentials: false From 3654bfd73dc728debfc280b2097664f595036197 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 29 Mar 2024 12:15:58 -0700 Subject: [PATCH 093/181] Bump docker/setup-buildx-action from 1.7.0 to 3.2.0 (#539) Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 1.7.0 to 3.2.0. - [Release notes](https://github.com/docker/setup-buildx-action/releases) - [Commits](https://github.com/docker/setup-buildx-action/compare/f211e3e9ded2d9377c8cadc4489a4e38014bc4c9...2b51285047da1547ffb1b2203d8be4c0af6b1f20) --- updated-dependencies: - dependency-name: docker/setup-buildx-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/main.yaml | 2 +- .github/workflows/release.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 42374e23..47d95b04 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -26,7 +26,7 @@ jobs: - name: Set up Docker buildx id: buildx - uses: docker/setup-buildx-action@f211e3e9ded2d9377c8cadc4489a4e38014bc4c9 # v1.7.0 + uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20 # v3.2.0 - name: build and push docker image run: | diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 95e1801e..700b442c 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -25,7 +25,7 @@ jobs: - name: Set up Docker buildx id: buildx - uses: docker/setup-buildx-action@f211e3e9ded2d9377c8cadc4489a4e38014bc4c9 # v1.7.0 + uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20 # v3.2.0 - name: build and push docker image run: | From 247089f30127cb1149507f894a2c219b06d98527 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Br=C3=BCderl?= Date: Thu, 4 Apr 2024 17:38:43 +0200 Subject: [PATCH 094/181] config: fix detailed metric keys missing in leading keys (#528) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit only the last descriptor uses the provided descriptor value in case of detailed metrics. when traversing the list of descriptors, the code "loses" the previous keys. this leads to metrics like: "test-domain.first-key_.second-key_second-value", where the last descriptor properly uses the detailed metric descriptor value, but all other descriptors (the first one here) are missing the value. this patch introduces a new string builder, that builds the detailed metric as the iteration of the input descriptor is happening. a unit test is attached to show the behavior. it fails without the new code, and successfully preserves all descriptor keys with the patched code. Signed-off-by: Johannes Brüderl --- src/config/config_impl.go | 16 +- test/config/config_test.go | 316 +++++++++++++++++++++++++++++++++++++ 2 files changed, 331 insertions(+), 1 deletion(-) diff --git a/src/config/config_impl.go b/src/config/config_impl.go index 455b0861..0c4152a6 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -306,10 +306,19 @@ func (this *rateLimitConfigImpl) GetLimit( descriptorsMap := value.descriptors prevDescriptor := &value.rateLimitDescriptor + + // Build detailed metric as we traverse the list of descriptors + var detailedMetricFullKey strings.Builder + detailedMetricFullKey.WriteString(domain) + for i, entry := range descriptor.Entries { // First see if key_value is in the map. If that isn't in the map we look for just key // to check for a default value. finalKey := entry.Key + "_" + entry.Value + + detailedMetricFullKey.WriteString(".") + detailedMetricFullKey.WriteString(finalKey) + logger.Debugf("looking up key: %s", finalKey) nextDescriptor := descriptorsMap[finalKey] @@ -343,7 +352,7 @@ func (this *rateLimitConfigImpl) GetLimit( descriptorsMap = nextDescriptor.descriptors } else { if rateLimit != nil && rateLimit.DetailedMetric { - rateLimit = NewRateLimit(rateLimit.Limit.RequestsPerUnit, rateLimit.Limit.Unit, this.statsManager.NewStats(rateLimit.FullKey+"_"+entry.Value), rateLimit.Unlimited, rateLimit.ShadowMode, rateLimit.Name, rateLimit.Replaces, false) + rateLimit = NewRateLimit(rateLimit.Limit.RequestsPerUnit, rateLimit.Limit.Unit, this.statsManager.NewStats(rateLimit.FullKey), rateLimit.Unlimited, rateLimit.ShadowMode, rateLimit.Name, rateLimit.Replaces, rateLimit.DetailedMetric) } break @@ -351,6 +360,11 @@ func (this *rateLimitConfigImpl) GetLimit( prevDescriptor = nextDescriptor } + // Replace metric with detailed metric, if leaf descriptor is detailed. + if rateLimit != nil && rateLimit.DetailedMetric { + rateLimit.Stats = this.statsManager.NewStats(detailedMetricFullKey.String()) + } + return rateLimit } diff --git a/test/config/config_test.go b/test/config/config_test.go index 3ed3984b..bceaa2e6 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -12,6 +12,7 @@ import ( pb_type "github.com/envoyproxy/go-control-plane/envoy/type/v3" stats "github.com/lyft/gostats" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/envoyproxy/ratelimit/src/config" mockstats "github.com/envoyproxy/ratelimit/test/mocks/stats" @@ -631,3 +632,318 @@ func TestWildcardConfig(t *testing.T) { }) assert.Nil(midWildcard) } + +func TestDetailedMetric(t *testing.T) { + assert := require.New(t) + stats := stats.NewStore(stats.NewNullSink(), false) + + // Descriptor config with a realistic nested setup, that is re-used across + // multiple test cases. + realisticExampleConfig := &config.YamlRoot{ + Domain: "nested", + Descriptors: []config.YamlDescriptor{ + { + Key: "key_1", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 500, + Unit: "minute", + }, + DetailedMetric: true, + }, + { + Key: "key_1", + Value: "some-value-for-key-1", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 500, + Unit: "minute", + }, + }, + { + Key: "key_2", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 5000, + Unit: "minute", + }, + Descriptors: []config.YamlDescriptor{ + { + Key: "key_3", + DetailedMetric: true, + }, + { + Key: "key_3", + Value: "requested-key3-value", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 5, + Unit: "minute", + }, + DetailedMetric: true, + }, + }, + DetailedMetric: true, + }, + { + Key: "key_2", + Value: "specific-id", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 50000, + Unit: "minute", + }, + Descriptors: []config.YamlDescriptor{ + { + Key: "key_3", + DetailedMetric: true, + }, + { + Key: "key_3", + Value: "requested-key3-value", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 100, + Unit: "minute", + }, + DetailedMetric: true, + }, + }, + DetailedMetric: true, + }, + }, + } + + tests := []struct { + name string + config []config.RateLimitConfigToLoad + request *pb_struct.RateLimitDescriptor + expectedStatsKey string + }{ + { + name: "nested with no values but request only top-level key", + config: []config.RateLimitConfigToLoad{ + { + ConfigYaml: &config.YamlRoot{ + Domain: "nested", + Descriptors: []config.YamlDescriptor{ + { + Key: "key-1", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 100, + Unit: "minute", + }, + Descriptors: []config.YamlDescriptor{ + { + Key: "key-2", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 5, + Unit: "minute", + }, + }, + }, + DetailedMetric: true, + }, + }, + }, + }, + }, + request: &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + { + Key: "key-1", + Value: "value-1", + }, + }, + }, + expectedStatsKey: "nested.key-1_value-1", + }, + { + name: "nested with no values but request only top-level key with no detailed metric", + config: []config.RateLimitConfigToLoad{ + { + ConfigYaml: &config.YamlRoot{ + Domain: "nested", + Descriptors: []config.YamlDescriptor{ + { + Key: "key-1", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 100, + Unit: "minute", + }, + Descriptors: []config.YamlDescriptor{ + { + Key: "key-2", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 5, + Unit: "minute", + }, + }, + }, + DetailedMetric: false, + }, + }, + }, + }, + }, + request: &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + { + Key: "key-1", + Value: "value-1", + }, + }, + }, + expectedStatsKey: "nested.key-1", + }, + { + name: "nested with no values and request fully nested descriptors", + config: []config.RateLimitConfigToLoad{ + { + ConfigYaml: &config.YamlRoot{ + Domain: "nested", + Descriptors: []config.YamlDescriptor{ + { + Key: "key-1", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 100, + Unit: "minute", + }, + Descriptors: []config.YamlDescriptor{ + { + Key: "key-2", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 5, + Unit: "minute", + }, + DetailedMetric: true, + }, + }, + DetailedMetric: true, + }, + }, + }, + }, + }, + request: &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + { + Key: "key-1", + Value: "value-1", + }, + { + Key: "key-2", + Value: "value-2", + }, + }, + }, + expectedStatsKey: "nested.key-1_value-1.key-2_value-2", + }, + { + name: "nested with no values and request fully nested descriptors with no detailed metric", + config: []config.RateLimitConfigToLoad{ + { + ConfigYaml: &config.YamlRoot{ + Domain: "nested", + Descriptors: []config.YamlDescriptor{ + { + Key: "key-1", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 100, + Unit: "minute", + }, + Descriptors: []config.YamlDescriptor{ + { + Key: "key-2", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 5, + Unit: "minute", + }, + DetailedMetric: false, + }, + }, + DetailedMetric: false, + }, + }, + }, + }, + }, + request: &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + { + Key: "key-1", + Value: "value-1", + }, + { + Key: "key-2", + Value: "value-2", + }, + }, + }, + expectedStatsKey: "nested.key-1.key-2", + }, + { + name: "test nested descriptors with simple request", + config: []config.RateLimitConfigToLoad{ + { + ConfigYaml: realisticExampleConfig, + }, + }, + request: &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + { + Key: "key_1", + Value: "value-for-key-1", + }, + }, + }, + expectedStatsKey: "nested.key_1_value-for-key-1", + }, + { + name: "test nested only second descriptor request not nested", + config: []config.RateLimitConfigToLoad{ + { + ConfigYaml: realisticExampleConfig, + }, + }, + request: &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + { + Key: "key_2", + Value: "key-2-value", + }, + }, + }, + expectedStatsKey: "nested.key_2_key-2-value", + }, + { + name: "test nested descriptors with nested request", + config: []config.RateLimitConfigToLoad{ + { + ConfigYaml: realisticExampleConfig, + }, + }, + request: &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + { + Key: "key_2", + Value: "key-2-value", + }, + { + Key: "key_3", + Value: "requested-key3-value", + }, + }, + }, + expectedStatsKey: "nested.key_2_key-2-value.key_3_requested-key3-value", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rlConfig := config.NewRateLimitConfigImpl(tt.config, mockstats.NewMockStatManager(stats), true) + rlConfig.Dump() + + rl := rlConfig.GetLimit( + context.TODO(), "nested", + tt.request, + ) + assert.NotNil(rl) + assert.Equal(tt.expectedStatsKey, rl.Stats.Key) + }) + } +} From 7f35f22339cae86390a40e5ccf3b65f26324c007 Mon Sep 17 00:00:00 2001 From: Zak Henry Date: Fri, 5 Apr 2024 14:57:42 +1300 Subject: [PATCH 095/181] fix(Stats): restore stdout stats option, allow flush interval to be configured with env vars (#549) Signed-off-by: Zak Henry --- README.md | 8 ++++++++ src/service_cmd/runner/runner.go | 15 +++++++++++---- src/settings/settings.go | 10 ++++++---- 3 files changed, 25 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 25a698d8..3fb16b94 100644 --- a/README.md +++ b/README.md @@ -788,6 +788,14 @@ The rate limit service generates various statistics for each configured rate lim users both for visibility and for setting alarms. Ratelimit uses [gostats](https://github.com/lyft/gostats) as its statistics library. Please refer to [gostats' documentation](https://godoc.org/github.com/lyft/gostats) for more information on the library. +Statistics default to using [StatsD](https://github.com/statsd/statsd) and configured via the env vars from [gostats](https://github.com/lyft/gostats). + +To output statistics to stdout instead, set env var `USE_STATSD` to `false` + +Configure statistics output frequency with `STATS_FLUSH_INTERVAL`, where the type is `time.Duration`, e.g. `10s` is the default value. + +To disable statistics entirely, set env var `DISABLE_STATS` to `true` + Rate Limit Statistic Path: ``` diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 6e125be9..f196ea4f 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -39,14 +39,21 @@ type Runner struct { func NewRunner(s settings.Settings) Runner { var store gostats.Store - // use statsd - if s.UseStatsd { + + if s.DisableStats { + logger.Info("Stats disabled") + store = gostats.NewStore(gostats.NewNullSink(), false) + } else if s.UseStatsd { + logger.Info("Stats initialized for statsd") store = gostats.NewStore(gostats.NewTCPStatsdSink(gostats.WithStatsdHost(s.StatsdHost), gostats.WithStatsdPort(s.StatsdPort)), false) } else { - store = gostats.NewStore(gostats.NewNullSink(), false) + logger.Info("Stats initialized for stdout") + store = gostats.NewStore(gostats.NewLoggingSink(), false) } - go store.Start(time.NewTicker(10 * time.Second)) + logger.Infof("Stats flush interval: %s", s.StatsFlushInterval) + + go store.Start(time.NewTicker(s.StatsFlushInterval)) return Runner{ statsManager: stats.NewStatManager(store, s), diff --git a/src/settings/settings.go b/src/settings/settings.go index f3c3721d..5a6c40f1 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -76,10 +76,12 @@ type Settings struct { XdsClientBackoffJitter bool `envconfig:"XDS_CLIENT_BACKOFF_JITTER" default:"true"` // Stats-related settings - UseStatsd bool `envconfig:"USE_STATSD" default:"true"` - StatsdHost string `envconfig:"STATSD_HOST" default:"localhost"` - StatsdPort int `envconfig:"STATSD_PORT" default:"8125"` - ExtraTags map[string]string `envconfig:"EXTRA_TAGS" default:""` + UseStatsd bool `envconfig:"USE_STATSD" default:"true"` + StatsdHost string `envconfig:"STATSD_HOST" default:"localhost"` + StatsdPort int `envconfig:"STATSD_PORT" default:"8125"` + ExtraTags map[string]string `envconfig:"EXTRA_TAGS" default:""` + StatsFlushInterval time.Duration `envconfig:"STATS_FLUSH_INTERVAL" default:"10s"` + DisableStats bool `envconfig:"DISABLE_STATS" default:"false"` // Settings for rate limit configuration RuntimePath string `envconfig:"RUNTIME_ROOT" default:"/srv/runtime_data/current"` From 7adf8ccfe712b216f814bc376f8e2c17d24ec7ad Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Apr 2024 19:58:13 -0600 Subject: [PATCH 096/181] Bump github.com/stretchr/testify from 1.8.4 to 1.9.0 (#548) Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.8.4 to 1.9.0. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.8.4...v1.9.0) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 13 ++++--------- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/go.mod b/go.mod index 45ef3a66..365390d1 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/lyft/gostats v0.4.12 github.com/mediocregopher/radix/v3 v3.8.1 github.com/sirupsen/logrus v1.9.3 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 @@ -47,7 +47,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 // indirect github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/objx v0.5.1 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/yuin/gopher-lua v1.1.1 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect diff --git a/go.sum b/go.sum index 2b8a48b3..780e4898 100644 --- a/go.sum +++ b/go.sum @@ -116,19 +116,14 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.1 h1:4VhoImhV/Bm0ToFkXFi8hXNXwpDRZ/ynw3amt82mzq0= -github.com/stretchr/objx v0.5.1/go.mod h1:/iHQpkQwBD6DLUmQ4pE+s1TXdob1mORJ4/UFdrifcy0= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= From 95e2a6e306eff5380621ae80cbd3d382e990d1f7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Apr 2024 19:58:33 -0600 Subject: [PATCH 097/181] Bump actions/setup-python from 5.0.0 to 5.1.0 (#546) Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5.0.0 to 5.1.0. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/0a5c61591373683505ea898e09a3ea4f39ef2b9c...82c7e631bb3cdc910f68e0081d67478d79c6982d) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/main.yaml | 2 +- .github/workflows/pullrequest.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 47d95b04..75d63c91 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -43,7 +43,7 @@ jobs: steps: - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: "3.9" diff --git a/.github/workflows/pullrequest.yaml b/.github/workflows/pullrequest.yaml index b7f58742..4b24a25f 100644 --- a/.github/workflows/pullrequest.yaml +++ b/.github/workflows/pullrequest.yaml @@ -31,7 +31,7 @@ jobs: steps: - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: "3.9" From 214a6d56bb13dbad3d8671626f7efe6a5b693293 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Apr 2024 19:58:51 -0600 Subject: [PATCH 098/181] Bump go.opentelemetry.io/otel/sdk from 1.21.0 to 1.24.0 (#547) Bumps [go.opentelemetry.io/otel/sdk](https://github.com/open-telemetry/opentelemetry-go) from 1.21.0 to 1.24.0. - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.21.0...v1.24.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/otel/sdk dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 12 ++++++------ go.sum | 24 ++++++++++++------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index 365390d1..073ecfb2 100644 --- a/go.mod +++ b/go.mod @@ -21,12 +21,12 @@ require ( github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 - go.opentelemetry.io/otel v1.21.0 + go.opentelemetry.io/otel v1.24.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 - go.opentelemetry.io/otel/sdk v1.21.0 - go.opentelemetry.io/otel/trace v1.21.0 + go.opentelemetry.io/otel/sdk v1.24.0 + go.opentelemetry.io/otel/trace v1.24.0 golang.org/x/net v0.19.0 google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.32.0 @@ -42,16 +42,16 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 // indirect github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/yuin/gopher-lua v1.1.1 // indirect - go.opentelemetry.io/otel/metric v1.21.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect - golang.org/x/sys v0.15.0 // indirect + golang.org/x/sys v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4 // indirect diff --git a/go.sum b/go.sum index 780e4898..aef6ed9c 100644 --- a/go.sum +++ b/go.sum @@ -48,8 +48,8 @@ github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyT github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -132,20 +132,20 @@ github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= +go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -198,8 +198,8 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= From 9a3bb6c66460f0c6a87fa19bec77069937e9fbf9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Apr 2024 19:59:11 -0600 Subject: [PATCH 099/181] Bump google.golang.org/grpc in /examples/xds-sotw-config-server (#537) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.59.0 to 1.62.1. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.59.0...v1.62.1) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/xds-sotw-config-server/go.mod | 18 +++--- examples/xds-sotw-config-server/go.sum | 81 +++++++------------------- 2 files changed, 29 insertions(+), 70 deletions(-) diff --git a/examples/xds-sotw-config-server/go.mod b/examples/xds-sotw-config-server/go.mod index 2b262621..6e4f5a40 100644 --- a/examples/xds-sotw-config-server/go.mod +++ b/examples/xds-sotw-config-server/go.mod @@ -4,20 +4,20 @@ go 1.21.5 require ( github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b - google.golang.org/grpc v1.59.0 + google.golang.org/grpc v1.62.1 ) require ( github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect - github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect - github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect + github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect - google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + golang.org/x/net v0.20.0 // indirect + golang.org/x/sys v0.16.0 // indirect + golang.org/x/text v0.14.0 // indirect + google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect google.golang.org/protobuf v1.32.0 // indirect ) diff --git a/examples/xds-sotw-config-server/go.sum b/examples/xds-sotw-config-server/go.sum index 67029dff..ce1049b7 100644 --- a/examples/xds-sotw-config-server/go.sum +++ b/examples/xds-sotw-config-server/go.sum @@ -1,84 +1,43 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b h1:M0BhcNaW04UV1haQO8IFSDB64dAeiBSsTMZks/sYDcQ= github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b/go.mod h1:lFu6itz1hckLR2A3aJ+ZKf3lu8HpjTsJSsqvVF6GL6g= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= -github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 h1:pH+U6pJP0BhxqQ4njBUjOg0++WMMvv3eByWzB+oATBY= github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= -google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= -google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= -google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= +google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= +google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 103cdde204f8ba266044d4c3c15b8797f67ba920 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Apr 2024 20:00:17 -0600 Subject: [PATCH 100/181] Bump golang from 1.21.5 to 1.22.1 (#536) Bumps golang from 1.21.5 to 1.22.1. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- Dockerfile.integration | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 9e5c40ab..b2e23f59 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.5@sha256:672a2286da3ee7a854c3e0a56e0838918d0dbb1c18652992930293312de898a6 AS build +FROM golang:1.22.1@sha256:34ce21a9696a017249614876638ea37ceca13cdd88f582caad06f87a8aa45bf3 AS build WORKDIR /ratelimit ENV GOPROXY=https://proxy.golang.org diff --git a/Dockerfile.integration b/Dockerfile.integration index 25cae665..d0b89df4 100644 --- a/Dockerfile.integration +++ b/Dockerfile.integration @@ -1,5 +1,5 @@ # Running this docker image runs the integration tests. -FROM golang@sha256:672a2286da3ee7a854c3e0a56e0838918d0dbb1c18652992930293312de898a6 +FROM golang@sha256:34ce21a9696a017249614876638ea37ceca13cdd88f582caad06f87a8aa45bf3 RUN apt-get update -y && apt-get install sudo stunnel4 redis memcached -y && rm -rf /var/lib/apt/lists/* From 6e2c37fecdfa539e5d5a650887db8272b3695390 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Apr 2024 20:00:49 -0600 Subject: [PATCH 101/181] Bump golang from 1.21.5 to 1.22.1 in /examples/xds-sotw-config-server (#534) Bumps golang from 1.21.5 to 1.22.1. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/xds-sotw-config-server/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/xds-sotw-config-server/Dockerfile b/examples/xds-sotw-config-server/Dockerfile index 98388a72..8b21a929 100644 --- a/examples/xds-sotw-config-server/Dockerfile +++ b/examples/xds-sotw-config-server/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.5@sha256:672a2286da3ee7a854c3e0a56e0838918d0dbb1c18652992930293312de898a6 AS build +FROM golang:1.22.1@sha256:34ce21a9696a017249614876638ea37ceca13cdd88f582caad06f87a8aa45bf3 AS build WORKDIR /xds-server COPY . . From ecd76b25da6f9e7cfe7980862c17f93e5f1e0776 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Apr 2024 20:04:40 -0600 Subject: [PATCH 102/181] Bump google.golang.org/grpc from 1.59.0 to 1.63.0 (#551) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.59.0 to 1.63.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.59.0...v1.63.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 18 +++++++++--------- go.sum | 54 +++++++++++++++++++++++++----------------------------- 2 files changed, 34 insertions(+), 38 deletions(-) diff --git a/go.mod b/go.mod index 073ecfb2..b5ac9a2b 100644 --- a/go.mod +++ b/go.mod @@ -8,8 +8,8 @@ require ( github.com/coocood/freecache v1.2.4 github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b github.com/golang/mock v1.6.0 - github.com/golang/protobuf v1.5.3 - github.com/google/uuid v1.4.0 + github.com/golang/protobuf v1.5.4 + github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.1 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/jpillora/backoff v1.0.0 @@ -27,9 +27,9 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 go.opentelemetry.io/otel/sdk v1.24.0 go.opentelemetry.io/otel/trace v1.24.0 - golang.org/x/net v0.19.0 - google.golang.org/grpc v1.59.0 - google.golang.org/protobuf v1.32.0 + golang.org/x/net v0.21.0 + google.golang.org/grpc v1.63.0 + google.golang.org/protobuf v1.33.0 gopkg.in/yaml.v2 v2.4.0 ) @@ -40,7 +40,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -54,8 +54,8 @@ require ( golang.org/x/sys v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect - google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect + google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index aef6ed9c..1b595cfd 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= +cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= +cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -40,8 +40,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b h1:M0BhcNaW04UV1haQO8IFSDB64dAeiBSsTMZks/sYDcQ= github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b/go.mod h1:lFu6itz1hckLR2A3aJ+ZKf3lu8HpjTsJSsqvVF6GL6g= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= -github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= +github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= @@ -62,15 +62,13 @@ github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+Licev github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= @@ -174,11 +172,11 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= -golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= +golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= +golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -223,28 +221,26 @@ golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSm golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4 h1:W12Pwm4urIbRdGhMEg2NM9O3TWKjNcxQhs46V0ypf/k= -google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4/go.mod h1:5RBcpGRxr25RbDzY5w+dmaqpSEvl8Gwl1x2CICf60ic= -google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 h1:ZcOkrmX74HbKFYnpPY8Qsw93fC29TbJXspYKaBkSXDQ= -google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4/go.mod h1:k2dtGpRrbsSyKcNPKKI5sstZkrNCZwpU/ns96JoHbGg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= +google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= From 4537d291115eae2f0d4e446da9bf9cdd44d1333e Mon Sep 17 00:00:00 2001 From: alexhwcheng <121998721+alexhwcheng@users.noreply.github.com> Date: Thu, 4 Apr 2024 19:04:56 -0700 Subject: [PATCH 103/181] Update alpine to 3.18.6 to address openssl vulnerability (#531) Signed-off-by: alexhwcheng <121998721+alexhwcheng@users.noreply.github.com> --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index b2e23f59..6c06460b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,6 +10,6 @@ COPY script script RUN CGO_ENABLED=0 GOOS=linux go build -o /go/bin/ratelimit -ldflags="-w -s" -v github.com/envoyproxy/ratelimit/src/service_cmd -FROM alpine:3.18.5@sha256:34871e7290500828b39e22294660bee86d966bc0017544e848dd9a255cdf59e0 AS final +FROM alpine:3.18.6@sha256:11e21d7b981a59554b3f822c49f6e9f57b6068bb74f49c4cd5cc4c663c7e5160 AS final RUN apk --no-cache add ca-certificates && apk --no-cache update COPY --from=build /go/bin/ratelimit /bin/ratelimit From 82a4fb567add63a0f604d7079ab7ed6baf67e6af Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Sun, 7 Apr 2024 22:28:45 +0200 Subject: [PATCH 104/181] use `google.golang.org/protobuf` instead of `github.com/golang/protobuf` (#553) Signed-off-by: Matthieu MOREL --- go.mod | 2 +- src/provider/xds_grpc_sotw_provider.go | 3 +-- src/utils/utilities.go | 6 +++--- test/integration/integration_test.go | 6 +++--- 4 files changed, 8 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index b5ac9a2b..922b22ac 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,6 @@ require ( github.com/coocood/freecache v1.2.4 github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b github.com/golang/mock v1.6.0 - github.com/golang/protobuf v1.5.4 github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.1 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 @@ -44,6 +43,7 @@ require ( github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 // indirect github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect diff --git a/src/provider/xds_grpc_sotw_provider.go b/src/provider/xds_grpc_sotw_provider.go index 9dc9faee..48c9cf72 100644 --- a/src/provider/xds_grpc_sotw_provider.go +++ b/src/provider/xds_grpc_sotw_provider.go @@ -10,7 +10,6 @@ import ( corev3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" "github.com/envoyproxy/go-control-plane/pkg/resource/v3" - "github.com/golang/protobuf/ptypes/any" grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" "github.com/jpillora/backoff" logger "github.com/sirupsen/logrus" @@ -149,7 +148,7 @@ func (p *XdsGrpcSotwProvider) getGrpcTransportCredentials() grpc.DialOption { return grpc.WithTransportCredentials(credentials.NewTLS(configGrpcXdsTlsConfig)) } -func (p *XdsGrpcSotwProvider) sendConfigs(resources []*any.Any) { +func (p *XdsGrpcSotwProvider) sendConfigs(resources []*anypb.Any) { defer func() { if e := recover(); e != nil { p.configUpdateEventChan <- &ConfigUpdateEventImpl{err: e} diff --git a/src/utils/utilities.go b/src/utils/utilities.go index 48f7f7ca..abe787c0 100644 --- a/src/utils/utilities.go +++ b/src/utils/utilities.go @@ -4,7 +4,7 @@ import ( "strings" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/golang/protobuf/ptypes/duration" + "google.golang.org/protobuf/types/known/durationpb" ) // Interface for a time source. @@ -31,10 +31,10 @@ func UnitToDivider(unit pb.RateLimitResponse_RateLimit_Unit) int64 { panic("should not get here") } -func CalculateReset(unit *pb.RateLimitResponse_RateLimit_Unit, timeSource TimeSource) *duration.Duration { +func CalculateReset(unit *pb.RateLimitResponse_RateLimit_Unit, timeSource TimeSource) *durationpb.Duration { sec := UnitToDivider(*unit) now := timeSource.UnixNow() - return &duration.Duration{Seconds: sec - now%sec} + return &durationpb.Duration{Seconds: sec - now%sec} } func Max(a uint32, b uint32) uint32 { diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 7239e7c3..fda35cd9 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -14,12 +14,12 @@ import ( "time" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/golang/protobuf/ptypes/duration" "github.com/kelseyhightower/envconfig" "github.com/stretchr/testify/assert" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/protobuf/types/known/durationpb" "github.com/envoyproxy/ratelimit/src/memcached" "github.com/envoyproxy/ratelimit/src/service_cmd/runner" @@ -59,14 +59,14 @@ func defaultSettings() settings.Settings { return s } -func newDescriptorStatus(status pb.RateLimitResponse_Code, requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, limitRemaining uint32, durRemaining *duration.Duration) *pb.RateLimitResponse_DescriptorStatus { +func newDescriptorStatus(status pb.RateLimitResponse_Code, requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, limitRemaining uint32, durRemaining *durationpb.Duration) *pb.RateLimitResponse_DescriptorStatus { limit := &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit} return &pb.RateLimitResponse_DescriptorStatus{ Code: status, CurrentLimit: limit, LimitRemaining: limitRemaining, - DurationUntilReset: &duration.Duration{Seconds: durRemaining.GetSeconds()}, + DurationUntilReset: &durationpb.Duration{Seconds: durRemaining.GetSeconds()}, } } From b68fb9034a2ecff09cd85ec7c914b8bf37cf01d7 Mon Sep 17 00:00:00 2001 From: healthwaite <148101100+healthwaite@users.noreply.github.com> Date: Sat, 13 Apr 2024 05:40:30 +0100 Subject: [PATCH 105/181] Add support for starting the server on a unix domain sockets (#542) * Add support for unix domain sockets Signed-off-by: Alex Reid * Add documentation Signed-off-by: Alex Reid --------- Signed-off-by: Alex Reid --- README.md | 8 ++++++ src/server/server_impl.go | 57 ++++++++++++++++++++++++++++----------- src/settings/settings.go | 3 +++ 3 files changed, 52 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index 3fb16b94..793e7607 100644 --- a/README.md +++ b/README.md @@ -35,6 +35,7 @@ - [GRPC Keepalive](#grpc-keepalive) - [Health-check](#health-check) - [Health-check configurations](#health-check-configurations) + - [GRPC server](#grpc-server) - [Request Fields](#request-fields) - [GRPC Client](#grpc-client) - [Commandline flags](#commandline-flags) @@ -748,6 +749,13 @@ HEALTHY_WITH_AT_LEAST_ONE_CONFIG_LOADED default:"false"` If `HEALTHY_WITH_AT_LEAST_ONE_CONFIG_LOADED` is enabled then health check will start as unhealthy and becomes healthy if it detects at least one domain is loaded with the config. If it detects no config again then it will change to unhealthy. +## GRPC server + +By default the ratelimit gRPC server binds to `0.0.0.0:8081`. To change this set +`GRPC_HOST` and/or `GRPC_PORT`. If you want to run the server on a unix domain +socket then set `GRPC_UDS`, e.g. `GRPC_UDS=//ratelimit.sock` and leave +`GRPC_HOST` and `GRPC_PORT` unmodified. + # Request Fields For information on the fields of a Ratelimit gRPC request please read the information diff --git a/src/server/server_impl.go b/src/server/server_impl.go index 85c636b7..0b42b40f 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -50,20 +50,28 @@ type serverDebugListener struct { listener net.Listener } +type grpcListenType int + +const ( + tcp grpcListenType = 0 + unixDomainSocket grpcListenType = 1 +) + type server struct { - httpAddress string - grpcAddress string - debugAddress string - router *mux.Router - grpcServer *grpc.Server - store gostats.Store - scope gostats.Scope - provider provider.RateLimitConfigProvider - runtime loader.IFace - debugListener serverDebugListener - httpServer *http.Server - listenerMu sync.Mutex - health *HealthChecker + httpAddress string + grpcAddress string + grpcListenType grpcListenType + debugAddress string + router *mux.Router + grpcServer *grpc.Server + store gostats.Store + scope gostats.Scope + provider provider.RateLimitConfigProvider + runtime loader.IFace + debugListener serverDebugListener + httpServer *http.Server + listenerMu sync.Mutex + health *HealthChecker } func (server *server) AddDebugHttpEndpoint(path string, help string, handler http.HandlerFunc) { @@ -197,9 +205,20 @@ func (server *server) Start() { func (server *server) startGrpc() { logger.Warnf("Listening for gRPC on '%s'", server.grpcAddress) - lis, err := reuseport.Listen("tcp", server.grpcAddress) + var lis net.Listener + var err error + + switch server.grpcListenType { + case tcp: + lis, err = reuseport.Listen("tcp", server.grpcAddress) + case unixDomainSocket: + lis, err = net.Listen("unix", server.grpcAddress) + default: + logger.Fatalf("Invalid gRPC listen type %v", server.grpcListenType) + } + if err != nil { - logger.Fatalf("Failed to listen for gRPC: %v", err) + logger.Fatalf("Failed to listen for gRPC on '%s': %v", server.grpcAddress, err) } server.grpcServer.Serve(lis) } @@ -247,7 +266,13 @@ func newServer(s settings.Settings, name string, statsManager stats.Manager, loc // setup listen addresses ret.httpAddress = net.JoinHostPort(s.Host, strconv.Itoa(s.Port)) - ret.grpcAddress = net.JoinHostPort(s.GrpcHost, strconv.Itoa(s.GrpcPort)) + if s.GrpcUds != "" { + ret.grpcAddress = s.GrpcUds + ret.grpcListenType = unixDomainSocket + } else { + ret.grpcAddress = net.JoinHostPort(s.GrpcHost, strconv.Itoa(s.GrpcPort)) + ret.grpcListenType = tcp + } ret.debugAddress = net.JoinHostPort(s.DebugHost, strconv.Itoa(s.DebugPort)) // setup stats diff --git a/src/settings/settings.go b/src/settings/settings.go index 5a6c40f1..695c75aa 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -21,6 +21,9 @@ type Settings struct { DebugPort int `envconfig:"DEBUG_PORT" default:"6070"` // GRPC server settings + // If GrpcUds is set we'll listen on the specified unix domain socket address + // rather then GrpcHost:GrpcPort. e.g. GrpcUds=/tmp/ratelimit.sock + GrpcUds string `envconfig:"GRPC_UDS" default:""` GrpcHost string `envconfig:"GRPC_HOST" default:"0.0.0.0"` GrpcPort int `envconfig:"GRPC_PORT" default:"8081"` // GrpcServerTlsConfig configures grpc for the server From fea2f30bbd7798aeafecc2a9afc0ac2963664b4e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 12 Apr 2024 21:40:54 -0700 Subject: [PATCH 106/181] Bump github/codeql-action from 3.24.9 to 3.24.10 (#554) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.24.9 to 3.24.10. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/1b1aada464948af03b950897e5eb522f92603cc2...4355270be187e1b672a7a1c7c7bae5afdc1ab94a) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecard.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index a3f9ccd3..ab01275b 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -35,14 +35,14 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 + uses: github/codeql-action/init@4355270be187e1b672a7a1c7c7bae5afdc1ab94a # v3.24.10 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 + uses: github/codeql-action/autobuild@4355270be187e1b672a7a1c7c7bae5afdc1ab94a # v3.24.10 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 + uses: github/codeql-action/analyze@4355270be187e1b672a7a1c7c7bae5afdc1ab94a # v3.24.10 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 5fc42540..5707c582 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -64,6 +64,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@1b1aada464948af03b950897e5eb522f92603cc2 # v3.24.9 + uses: github/codeql-action/upload-sarif@4355270be187e1b672a7a1c7c7bae5afdc1ab94a # v3.24.10 with: sarif_file: results.sarif From d98c580c2e2f3fdd63987df6ac65a9a1151dbfe2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 12 Apr 2024 21:41:19 -0700 Subject: [PATCH 107/181] Bump golang from 1.22.1 to 1.22.2 (#560) Bumps golang from 1.22.1 to 1.22.2. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- Dockerfile.integration | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 6c06460b..8f095670 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.1@sha256:34ce21a9696a017249614876638ea37ceca13cdd88f582caad06f87a8aa45bf3 AS build +FROM golang:1.22.2@sha256:c4fb952e712efd8f787bcd8e53fd66d1d83b7dc26adabc218e9eac1dbf776bdf AS build WORKDIR /ratelimit ENV GOPROXY=https://proxy.golang.org diff --git a/Dockerfile.integration b/Dockerfile.integration index d0b89df4..cdce0d60 100644 --- a/Dockerfile.integration +++ b/Dockerfile.integration @@ -1,5 +1,5 @@ # Running this docker image runs the integration tests. -FROM golang@sha256:34ce21a9696a017249614876638ea37ceca13cdd88f582caad06f87a8aa45bf3 +FROM golang@sha256:c4fb952e712efd8f787bcd8e53fd66d1d83b7dc26adabc218e9eac1dbf776bdf RUN apt-get update -y && apt-get install sudo stunnel4 redis memcached -y && rm -rf /var/lib/apt/lists/* From 8b6a44b0dcbd439e672163ed1c286dbea28aaffa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 12 Apr 2024 21:41:56 -0700 Subject: [PATCH 108/181] Bump google.golang.org/grpc in /examples/xds-sotw-config-server (#562) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.62.1 to 1.63.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.62.1...v1.63.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/xds-sotw-config-server/go.mod | 16 +++++------ examples/xds-sotw-config-server/go.sum | 37 +++++++++++--------------- 2 files changed, 24 insertions(+), 29 deletions(-) diff --git a/examples/xds-sotw-config-server/go.mod b/examples/xds-sotw-config-server/go.mod index 6e4f5a40..58979aef 100644 --- a/examples/xds-sotw-config-server/go.mod +++ b/examples/xds-sotw-config-server/go.mod @@ -4,20 +4,20 @@ go 1.21.5 require ( github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b - google.golang.org/grpc v1.62.1 + google.golang.org/grpc v1.63.0 ) require ( github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 // indirect - golang.org/x/net v0.20.0 // indirect - golang.org/x/sys v0.16.0 // indirect + golang.org/x/net v0.21.0 // indirect + golang.org/x/sys v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect - google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/protobuf v1.32.0 // indirect + google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/protobuf v1.33.0 // indirect ) diff --git a/examples/xds-sotw-config-server/go.sum b/examples/xds-sotw-config-server/go.sum index ce1049b7..6c7656be 100644 --- a/examples/xds-sotw-config-server/go.sum +++ b/examples/xds-sotw-config-server/go.sum @@ -8,10 +8,8 @@ github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b h1: github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b/go.mod h1:lFu6itz1hckLR2A3aJ+ZKf3lu8HpjTsJSsqvVF6GL6g= github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 h1:pH+U6pJP0BhxqQ4njBUjOg0++WMMvv3eByWzB+oATBY= @@ -20,24 +18,21 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= +google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= +google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= From a618388acdc72eebb3708f852eff4b939ea22a3a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 Apr 2024 12:34:10 -0700 Subject: [PATCH 109/181] Bump github.com/lyft/gostats from 0.4.12 to 0.4.13 (#558) Bumps [github.com/lyft/gostats](https://github.com/lyft/gostats) from 0.4.12 to 0.4.13. - [Release notes](https://github.com/lyft/gostats/releases) - [Commits](https://github.com/lyft/gostats/compare/v0.4.12...v0.4.13) --- updated-dependencies: - dependency-name: github.com/lyft/gostats dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 922b22ac..09d8dc10 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/kavu/go_reuseport v1.5.0 github.com/kelseyhightower/envconfig v1.4.0 github.com/lyft/goruntime v0.3.0 - github.com/lyft/gostats v0.4.12 + github.com/lyft/gostats v0.4.13 github.com/mediocregopher/radix/v3 v3.8.1 github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.9.0 diff --git a/go.sum b/go.sum index 1b595cfd..08660ed4 100644 --- a/go.sum +++ b/go.sum @@ -95,8 +95,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lyft/goruntime v0.3.0 h1:VLBYR4s3XazkUT8lLtq9CJrt58YmLQQumrK3ktenEkI= github.com/lyft/goruntime v0.3.0/go.mod h1:BW1gngSpMJR9P9w23BPUPdhdbUWhpirl98TQhOWWMF4= github.com/lyft/gostats v0.4.1/go.mod h1:Tpx2xRzz4t+T2Tx0xdVgIoBdR2UMVz+dKnE3X01XSd8= -github.com/lyft/gostats v0.4.12 h1:vaQMrsY4QH9GOeJUkZ7bHm8kqS92IhHuuwh7vTQ4qyQ= -github.com/lyft/gostats v0.4.12/go.mod h1:rMGud5RRaGYMG0KPS0GAUSBBs69yFMOMYjAnmcPTaG8= +github.com/lyft/gostats v0.4.13 h1:JWU47H0Kd1bgiLcI/mMIlagRJVa6U6nCbrN0GnFwzoc= +github.com/lyft/gostats v0.4.13/go.mod h1:cJWqEVL8JIewIJz/olUIios2F1q06Nc51hXejPQmBH0= github.com/mediocregopher/radix/v3 v3.8.1 h1:rOkHflVuulFKlwsLY01/M2cM2tWCjDoETcMqKbAWu1M= github.com/mediocregopher/radix/v3 v3.8.1/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= From 3fcc360931eaf90da9dfeaeb4504832c61d40304 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 Apr 2024 12:34:24 -0700 Subject: [PATCH 110/181] Bump golang from 1.22.1 to 1.22.2 in /examples/xds-sotw-config-server (#561) Bumps golang from 1.22.1 to 1.22.2. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/xds-sotw-config-server/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/xds-sotw-config-server/Dockerfile b/examples/xds-sotw-config-server/Dockerfile index 8b21a929..e6cfa02f 100644 --- a/examples/xds-sotw-config-server/Dockerfile +++ b/examples/xds-sotw-config-server/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.1@sha256:34ce21a9696a017249614876638ea37ceca13cdd88f582caad06f87a8aa45bf3 AS build +FROM golang:1.22.2@sha256:c4fb952e712efd8f787bcd8e53fd66d1d83b7dc26adabc218e9eac1dbf776bdf AS build WORKDIR /xds-server COPY . . From ca55e1b31d023d8b8aedb8a21ca3b769fb632e95 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 5 May 2024 16:37:09 -0700 Subject: [PATCH 111/181] Bump actions/upload-artifact from 3.1.0 to 4.3.3 (#577) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3.1.0 to 4.3.3. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/3cea5372237819ed00197afe530f5a7ea3e805c8...65462800fd760344b1a7b4382951275a0abb4808) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/scorecard.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 5707c582..47d9ee62 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -56,7 +56,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # v3.1.0 + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 with: name: SARIF file path: results.sarif From aae80dc9b219f6be94b9ee4e77baab5ee69f73df Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Jun 2024 22:48:47 -0600 Subject: [PATCH 112/181] Bump github.com/lyft/gostats from 0.4.13 to 0.4.14 (#609) Bumps [github.com/lyft/gostats](https://github.com/lyft/gostats) from 0.4.13 to 0.4.14. - [Release notes](https://github.com/lyft/gostats/releases) - [Commits](https://github.com/lyft/gostats/compare/v0.4.13...v0.4.14) --- updated-dependencies: - dependency-name: github.com/lyft/gostats dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 09d8dc10..f3330272 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/kavu/go_reuseport v1.5.0 github.com/kelseyhightower/envconfig v1.4.0 github.com/lyft/goruntime v0.3.0 - github.com/lyft/gostats v0.4.13 + github.com/lyft/gostats v0.4.14 github.com/mediocregopher/radix/v3 v3.8.1 github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.9.0 diff --git a/go.sum b/go.sum index 08660ed4..bd12cce9 100644 --- a/go.sum +++ b/go.sum @@ -95,8 +95,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lyft/goruntime v0.3.0 h1:VLBYR4s3XazkUT8lLtq9CJrt58YmLQQumrK3ktenEkI= github.com/lyft/goruntime v0.3.0/go.mod h1:BW1gngSpMJR9P9w23BPUPdhdbUWhpirl98TQhOWWMF4= github.com/lyft/gostats v0.4.1/go.mod h1:Tpx2xRzz4t+T2Tx0xdVgIoBdR2UMVz+dKnE3X01XSd8= -github.com/lyft/gostats v0.4.13 h1:JWU47H0Kd1bgiLcI/mMIlagRJVa6U6nCbrN0GnFwzoc= -github.com/lyft/gostats v0.4.13/go.mod h1:cJWqEVL8JIewIJz/olUIios2F1q06Nc51hXejPQmBH0= +github.com/lyft/gostats v0.4.14 h1:xmP4yMfDvEKtlNZEcS2sYz0cvnps1ri337ZEEbw3ab8= +github.com/lyft/gostats v0.4.14/go.mod h1:cJWqEVL8JIewIJz/olUIios2F1q06Nc51hXejPQmBH0= github.com/mediocregopher/radix/v3 v3.8.1 h1:rOkHflVuulFKlwsLY01/M2cM2tWCjDoETcMqKbAWu1M= github.com/mediocregopher/radix/v3 v3.8.1/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= From b7792693d722b2137da82e35965dae32407381aa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Jun 2024 22:49:03 -0600 Subject: [PATCH 113/181] Bump github/codeql-action from 3.24.10 to 3.25.7 (#608) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.24.10 to 3.25.7. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/4355270be187e1b672a7a1c7c7bae5afdc1ab94a...f079b8493333aace61c81488f8bd40919487bd9f) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecard.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index ab01275b..0bfedb1b 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -35,14 +35,14 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@4355270be187e1b672a7a1c7c7bae5afdc1ab94a # v3.24.10 + uses: github/codeql-action/init@f079b8493333aace61c81488f8bd40919487bd9f # v3.25.7 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@4355270be187e1b672a7a1c7c7bae5afdc1ab94a # v3.24.10 + uses: github/codeql-action/autobuild@f079b8493333aace61c81488f8bd40919487bd9f # v3.25.7 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@4355270be187e1b672a7a1c7c7bae5afdc1ab94a # v3.24.10 + uses: github/codeql-action/analyze@f079b8493333aace61c81488f8bd40919487bd9f # v3.25.7 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 47d9ee62..e48b6224 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -64,6 +64,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@4355270be187e1b672a7a1c7c7bae5afdc1ab94a # v3.24.10 + uses: github/codeql-action/upload-sarif@f079b8493333aace61c81488f8bd40919487bd9f # v3.25.7 with: sarif_file: results.sarif From e4fec80ee9ea5853c8f1f97a49f955dafd907ecc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Jun 2024 22:49:51 -0600 Subject: [PATCH 114/181] Bump go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc (#604) Bumps [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc](https://github.com/open-telemetry/opentelemetry-go) from 1.21.0 to 1.27.0. - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.21.0...v1.27.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 36 ++++++++++++------------ go.sum | 86 ++++++++++++++++++++++++++++------------------------------ 2 files changed, 59 insertions(+), 63 deletions(-) diff --git a/go.mod b/go.mod index f3330272..a21b8c3d 100644 --- a/go.mod +++ b/go.mod @@ -20,42 +20,42 @@ require ( github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 - go.opentelemetry.io/otel v1.24.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 + go.opentelemetry.io/otel v1.27.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 - go.opentelemetry.io/otel/sdk v1.24.0 - go.opentelemetry.io/otel/trace v1.24.0 - golang.org/x/net v0.21.0 - google.golang.org/grpc v1.63.0 - google.golang.org/protobuf v1.33.0 + go.opentelemetry.io/otel/sdk v1.27.0 + go.opentelemetry.io/otel/trace v1.27.0 + golang.org/x/net v0.25.0 + google.golang.org/grpc v1.64.0 + google.golang.org/protobuf v1.34.1 gopkg.in/yaml.v2 v2.4.0 ) require ( + cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 // indirect - github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect + github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/yuin/gopher-lua v1.1.1 // indirect - go.opentelemetry.io/otel/metric v1.24.0 // indirect - go.opentelemetry.io/proto/otlp v1.0.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.14.0 // indirect + go.opentelemetry.io/otel/metric v1.27.0 // indirect + go.opentelemetry.io/proto/otlp v1.2.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect - google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index bd12cce9..2efa2b03 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ +cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= -cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= -cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= +cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU= +cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -14,8 +14,8 @@ github.com/alicebob/miniredis/v2 v2.31.0/go.mod h1:UB/T2Uztp7MlFSDakaX1sTXUv5CAS github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 h1:N7oVaKyGp8bttX0bfZGmcGkjz7DLQXhAn3DNd3T0ous= github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874/go.mod h1:r5xuitiExdLAJ09PR7vBVENGvp4ZuTBeWTGtxuX3K+c= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= @@ -27,8 +27,8 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= +github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc= +github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM= github.com/coocood/freecache v1.2.4 h1:UdR6Yz/X1HW4fZOuH0Z94KwG851GWOSknua5VUbb/5M= github.com/coocood/freecache v1.2.4/go.mod h1:RBUWa/Cy+OHdfTGFEhEuE1pMCMX51Ncizj7rthiQ3vk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -73,8 +73,8 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 h1:6UKoz5ujsI55KNpsJH3UwCq3T8kKbZwNZBNPuTTje8U= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1/go.mod h1:YvJ2f6MplWDhfxiUC3KpyTy76kYUZA4W3pTv/wdKQ9Y= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/kavu/go_reuseport v1.5.0 h1:UNuiY2OblcqAtVDE8Gsg1kZz8zbBWg907sP1ceBV+bk= @@ -106,8 +106,8 @@ github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795/go.mod h1 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= @@ -130,22 +130,22 @@ github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= -go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= -go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= -go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= -go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= -go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= -go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= -go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= -go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= -go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= +go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -172,11 +172,11 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= -golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= +golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= +golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -196,13 +196,13 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -221,26 +221,22 @@ golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSm golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= -google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= -google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= +google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 h1:P8OJ/WCl/Xo4E4zoe4/bifHpSmmKwARqyqE4nW6J2GQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 h1:AgADTJarZTBqgjiUzRgfaBchgYB3/WFTC80GPwsMcRI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= -google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= From e754e7ff575f4f92b09999357449e36504ee8c36 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Jun 2024 22:50:35 -0600 Subject: [PATCH 115/181] Bump alpine from `51b6726` to `77726ef` in /integration-test (#600) Bumps alpine from `51b6726` to `77726ef`. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- integration-test/Dockerfile.tester | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-test/Dockerfile.tester b/integration-test/Dockerfile.tester index 18979fd9..04724e84 100644 --- a/integration-test/Dockerfile.tester +++ b/integration-test/Dockerfile.tester @@ -1,4 +1,4 @@ -FROM alpine@sha256:51b67269f354137895d43f3b3d810bfacd3945438e94dc5ac55fdac340352f48 +FROM alpine@sha256:77726ef6b57ddf65bb551896826ec38bc3e53f75cdde31354fbffb4f25238ebd USER root From 695c1ef3fdb11c79f9c0dd20cc6b09fb7dde3e47 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Jun 2024 22:51:21 -0600 Subject: [PATCH 116/181] Bump alpine from 3.16 to 3.20 in /examples/xds-sotw-config-server (#599) Bumps alpine from 3.16 to 3.20. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/xds-sotw-config-server/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/xds-sotw-config-server/Dockerfile b/examples/xds-sotw-config-server/Dockerfile index e6cfa02f..999fc4d2 100644 --- a/examples/xds-sotw-config-server/Dockerfile +++ b/examples/xds-sotw-config-server/Dockerfile @@ -5,7 +5,7 @@ COPY . . RUN CGO_ENABLED=0 GOOS=linux go build -o /go/bin/xds-server -v main/main.go -FROM alpine:3.16@sha256:e4cdb7d47b06ba0a062ad2a97a7d154967c8f83934594d9f2bd3efa89292996b AS final +FROM alpine:3.20@sha256:77726ef6b57ddf65bb551896826ec38bc3e53f75cdde31354fbffb4f25238ebd AS final RUN apk --no-cache add ca-certificates && apk --no-cache update COPY --from=build /go/bin/xds-server /bin/xds-server ENTRYPOINT [ "/bin/xds-server" ] From 0f73f8904eff2eef749ae1cf1c16be768c05b136 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Jun 2024 22:51:37 -0600 Subject: [PATCH 117/181] Bump alpine from 3.18.6 to 3.20.0 (#598) Bumps alpine from 3.18.6 to 3.20.0. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 8f095670..b5b02bec 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,6 +10,6 @@ COPY script script RUN CGO_ENABLED=0 GOOS=linux go build -o /go/bin/ratelimit -ldflags="-w -s" -v github.com/envoyproxy/ratelimit/src/service_cmd -FROM alpine:3.18.6@sha256:11e21d7b981a59554b3f822c49f6e9f57b6068bb74f49c4cd5cc4c663c7e5160 AS final +FROM alpine:3.20.0@sha256:77726ef6b57ddf65bb551896826ec38bc3e53f75cdde31354fbffb4f25238ebd AS final RUN apk --no-cache add ca-certificates && apk --no-cache update COPY --from=build /go/bin/ratelimit /bin/ratelimit From f17d88fb32686c167a1d627aab5f32988d93ef52 Mon Sep 17 00:00:00 2001 From: jespersoderlund Date: Fri, 7 Jun 2024 06:53:17 +0200 Subject: [PATCH 118/181] =?UTF-8?q?Including=20the=20name=20in=20the=20rat?= =?UTF-8?q?elimit=20descriptor=20so=20that=20it=20is=20returned=E2=80=A6?= =?UTF-8?q?=20(#596)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: jespersoderlund --- src/config/config_impl.go | 1 + test/config/basic_config.yaml | 1 + test/config/config_test.go | 2 ++ test/service/ratelimit_test.go | 2 +- 4 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/config/config_impl.go b/src/config/config_impl.go index 0c4152a6..df0e473d 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -85,6 +85,7 @@ func NewRateLimit(requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Un Limit: &pb.RateLimitResponse_RateLimit{ RequestsPerUnit: requestsPerUnit, Unit: unit, + Name: name, }, Unlimited: unlimited, ShadowMode: shadowMode, diff --git a/test/config/basic_config.yaml b/test/config/basic_config.yaml index 1ce7c9af..8992966f 100644 --- a/test/config/basic_config.yaml +++ b/test/config/basic_config.yaml @@ -42,6 +42,7 @@ descriptors: - key: key4 rate_limit: + name: key4_rate_limit unit: day requests_per_unit: 1 diff --git a/test/config/config_test.go b/test/config/config_test.go index bceaa2e6..eb52974f 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -149,6 +149,7 @@ func TestBasicConfig(t *testing.T) { rl.Stats.NearLimit.Inc() rl.Stats.WithinLimit.Inc() assert.EqualValues(1, rl.Limit.RequestsPerUnit) + assert.Empty(rl.Limit.Name, "No name provided in config") assert.Equal(pb.RateLimitResponse_RateLimit_HOUR, rl.Limit.Unit) assert.EqualValues(1, stats.NewCounter("test-domain.key3.total_hits").Value()) assert.EqualValues(1, stats.NewCounter("test-domain.key3.over_limit").Value()) @@ -165,6 +166,7 @@ func TestBasicConfig(t *testing.T) { rl.Stats.NearLimit.Inc() rl.Stats.WithinLimit.Inc() assert.EqualValues(1, rl.Limit.RequestsPerUnit) + assert.EqualValues("key4_rate_limit", rl.Limit.Name, "Name provided in config") assert.Equal(pb.RateLimitResponse_RateLimit_DAY, rl.Limit.Unit) assert.EqualValues(1, stats.NewCounter("test-domain.key4.total_hits").Value()) assert.EqualValues(1, stats.NewCounter("test-domain.key4.over_limit").Value()) diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index 4d2cde53..18c3cafc 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -153,7 +153,7 @@ func TestService(test *testing.T) { request = common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "key_name", nil, false), nil, } t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) From 2b7813ebae229d486d2ae2e0bb1bcc48e7cf9606 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Jun 2024 22:54:24 -0600 Subject: [PATCH 119/181] Bump golang.org/x/net from 0.25.0 to 0.26.0 (#611) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.25.0 to 0.26.0. - [Commits](https://github.com/golang/net/compare/v0.25.0...v0.26.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index a21b8c3d..40b7cbfb 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 go.opentelemetry.io/otel/sdk v1.27.0 go.opentelemetry.io/otel/trace v1.27.0 - golang.org/x/net v0.25.0 + golang.org/x/net v0.26.0 google.golang.org/grpc v1.64.0 google.golang.org/protobuf v1.34.1 gopkg.in/yaml.v2 v2.4.0 @@ -52,8 +52,8 @@ require ( github.com/yuin/gopher-lua v1.1.1 // indirect go.opentelemetry.io/otel/metric v1.27.0 // indirect go.opentelemetry.io/proto/otlp v1.2.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/text v0.15.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 // indirect diff --git a/go.sum b/go.sum index 2efa2b03..6174f3c2 100644 --- a/go.sum +++ b/go.sum @@ -172,8 +172,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= @@ -196,13 +196,13 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= From 470a044e02e5c49ce969276a33af449b33f6ea41 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Jun 2024 22:54:44 -0600 Subject: [PATCH 120/181] Bump google.golang.org/grpc in /examples/xds-sotw-config-server (#591) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.63.0 to 1.64.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.63.0...v1.64.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/xds-sotw-config-server/go.mod | 13 ++++++------- examples/xds-sotw-config-server/go.sum | 26 ++++++++++++-------------- 2 files changed, 18 insertions(+), 21 deletions(-) diff --git a/examples/xds-sotw-config-server/go.mod b/examples/xds-sotw-config-server/go.mod index 58979aef..462c1b6f 100644 --- a/examples/xds-sotw-config-server/go.mod +++ b/examples/xds-sotw-config-server/go.mod @@ -4,20 +4,19 @@ go 1.21.5 require ( github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b - google.golang.org/grpc v1.63.0 + google.golang.org/grpc v1.64.0 ) require ( github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect - github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect + github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 // indirect - golang.org/x/net v0.21.0 // indirect - golang.org/x/sys v0.17.0 // indirect + golang.org/x/net v0.22.0 // indirect + golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect - google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect google.golang.org/protobuf v1.33.0 // indirect ) diff --git a/examples/xds-sotw-config-server/go.sum b/examples/xds-sotw-config-server/go.sum index 6c7656be..d3d08558 100644 --- a/examples/xds-sotw-config-server/go.sum +++ b/examples/xds-sotw-config-server/go.sum @@ -1,7 +1,7 @@ github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= +github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc= +github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b h1:M0BhcNaW04UV1haQO8IFSDB64dAeiBSsTMZks/sYDcQ= @@ -18,20 +18,18 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= -google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= -google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= -google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= -google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4= +google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 573747dfe458a9bac2b8bde214ef013aa622b3f3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Jun 2024 22:54:58 -0600 Subject: [PATCH 121/181] Bump ossf/scorecard-action from 2.3.1 to 2.3.3 (#590) Bumps [ossf/scorecard-action](https://github.com/ossf/scorecard-action) from 2.3.1 to 2.3.3. - [Release notes](https://github.com/ossf/scorecard-action/releases) - [Changelog](https://github.com/ossf/scorecard-action/blob/main/RELEASE.md) - [Commits](https://github.com/ossf/scorecard-action/compare/0864cf19026789058feabb7e87baa5f140aac736...dc50aa9510b46c811795eb24b2f1ba02a914e534) --- updated-dependencies: - dependency-name: ossf/scorecard-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/scorecard.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index e48b6224..40d0613c 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -34,7 +34,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1 + uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # v2.3.3 with: results_file: results.sarif results_format: sarif From 23943f9da45d4e971ccbb3927ae048880b760795 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Jun 2024 22:55:20 -0600 Subject: [PATCH 122/181] Bump actions/checkout from 4.1.2 to 4.1.6 (#589) Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.2 to 4.1.6. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/9bb56186c3b09b4f86b1c65136769dd318469633...a5ac7e51b41094c92402da3b24376905380afc29) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 2 +- .github/workflows/main.yaml | 6 +++--- .github/workflows/pullrequest.yaml | 6 +++--- .github/workflows/release.yaml | 4 ++-- .github/workflows/scorecard.yml | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 0bfedb1b..9a68c5a0 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -26,7 +26,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Install Go uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 75d63c91..936d86c2 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -12,14 +12,14 @@ jobs: check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: check format run: make check_format build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Set up QEMU uses: docker/setup-qemu-action@27d0a4f181a40b142cce983c5393082c365d1480 # v1.2.0 @@ -41,7 +41,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: diff --git a/.github/workflows/pullrequest.yaml b/.github/workflows/pullrequest.yaml index 4b24a25f..23a4a380 100644 --- a/.github/workflows/pullrequest.yaml +++ b/.github/workflows/pullrequest.yaml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: check format run: make check_format @@ -20,7 +20,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: build and test run: make docker_tests @@ -29,7 +29,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 700b442c..0ae756ff 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -12,13 +12,13 @@ jobs: check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: check format run: make check_format build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Set up QEMU uses: docker/setup-qemu-action@27d0a4f181a40b142cce983c5393082c365d1480 # v1.2.0 diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 40d0613c..11130553 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -29,7 +29,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 with: persist-credentials: false From e34bcd0caf7425622ad0b5cc69c263a39ce1c028 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Jun 2024 22:56:22 -0600 Subject: [PATCH 123/181] Bump golang from 1.22.2 to 1.22.3 in /examples/xds-sotw-config-server (#583) Bumps golang from 1.22.2 to 1.22.3. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/xds-sotw-config-server/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/xds-sotw-config-server/Dockerfile b/examples/xds-sotw-config-server/Dockerfile index 999fc4d2..2fff1497 100644 --- a/examples/xds-sotw-config-server/Dockerfile +++ b/examples/xds-sotw-config-server/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.2@sha256:c4fb952e712efd8f787bcd8e53fd66d1d83b7dc26adabc218e9eac1dbf776bdf AS build +FROM golang:1.22.3@sha256:b1e05e2c918f52c59d39ce7d5844f73b2f4511f7734add8bb98c9ecdd4443365 AS build WORKDIR /xds-server COPY . . From 0e031768b5895f25f81d03f143578fbe2e16242a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Jun 2024 22:56:41 -0600 Subject: [PATCH 124/181] Bump actions/setup-go from 5.0.0 to 5.0.1 (#578) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.0 to 5.0.1. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/0c52d547c9bc32b1aa3301fd7a9cb496313a4491...cdcb36043654635271a94b9a6d1392de5bb323a7) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 2 +- .github/workflows/main.yaml | 2 +- .github/workflows/pullrequest.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 9a68c5a0..12d4fc25 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -29,7 +29,7 @@ jobs: uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Install Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 with: go-version-file: go.mod diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 936d86c2..c1f9b7ba 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -47,7 +47,7 @@ jobs: with: python-version: "3.9" - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 with: go-version: "1.21.5" diff --git a/.github/workflows/pullrequest.yaml b/.github/workflows/pullrequest.yaml index 23a4a380..e6b7edd5 100644 --- a/.github/workflows/pullrequest.yaml +++ b/.github/workflows/pullrequest.yaml @@ -35,7 +35,7 @@ jobs: with: python-version: "3.9" - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 with: go-version: "1.21.5" From 71e2eb7918ef956507efbb915d4581cb8e24382c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 7 Jun 2024 07:55:06 -0600 Subject: [PATCH 125/181] Bump golang from 1.22.2 to 1.22.3 (#584) Bumps golang from 1.22.2 to 1.22.3. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- Dockerfile.integration | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index b5b02bec..275fb12c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.2@sha256:c4fb952e712efd8f787bcd8e53fd66d1d83b7dc26adabc218e9eac1dbf776bdf AS build +FROM golang:1.22.3@sha256:b1e05e2c918f52c59d39ce7d5844f73b2f4511f7734add8bb98c9ecdd4443365 AS build WORKDIR /ratelimit ENV GOPROXY=https://proxy.golang.org diff --git a/Dockerfile.integration b/Dockerfile.integration index cdce0d60..ed162cfc 100644 --- a/Dockerfile.integration +++ b/Dockerfile.integration @@ -1,5 +1,5 @@ # Running this docker image runs the integration tests. -FROM golang@sha256:c4fb952e712efd8f787bcd8e53fd66d1d83b7dc26adabc218e9eac1dbf776bdf +FROM golang@sha256:b1e05e2c918f52c59d39ce7d5844f73b2f4511f7734add8bb98c9ecdd4443365 RUN apt-get update -y && apt-get install sudo stunnel4 redis memcached -y && rm -rf /var/lib/apt/lists/* From be31f86e67e3d81a2800ce5da811965d1081b870 Mon Sep 17 00:00:00 2001 From: Ian Kerins Date: Fri, 7 Jun 2024 09:55:31 -0400 Subject: [PATCH 126/181] Add TLS support to memcache (#594) Memcache supports TLS connections for client-server communication. Add support for configuring a TLS connection to memcached using TLS in a way that mirrors the existing redis configuration options. Also, ensure that configuration options that apply to the "normal" enumeration of host:port pairs also apply to the DNS SRV discovery mechanism. I don't see a reason why they should not. Signed-off-by: Ian Kerins --- README.md | 3 +++ src/memcached/cache_impl.go | 19 +++++++++++++++---- src/settings/settings.go | 22 +++++++++++++++++++--- 3 files changed, 37 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 793e7607..af65117c 100644 --- a/README.md +++ b/README.md @@ -1008,6 +1008,9 @@ To configure a Memcache instance use the following environment variables instead 1. `BACKEND_TYPE=memcache` 1. `CACHE_KEY_PREFIX`: a string to prepend to all cache keys 1. `MEMCACHE_MAX_IDLE_CONNS=2`: the maximum number of idle TCP connections per memcache node, `2` is the default of the underlying library +1. `MEMCACHE_TLS`: set to `"true"` to connect to the server with TLS. +1. `MEMCACHE_TLS_CLIENT_CERT`, `MEMCACHE_TLS_CLIENT_KEY`, and `MEMCACHE_TLS_CACERT` to provide files that parameterize the memcache client TLS connection configuration. +1. `MEMCACHE_TLS_SKIP_HOSTNAME_VERIFICATION` set to `"true"` will skip hostname verification in environments where the certificate has an invalid hostname. With memcache mode increments will happen asynchronously, so it's technically possible for a client to exceed quota briefly if multiple requests happen at exactly the same time. diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index a79451df..3b887a60 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -17,7 +17,9 @@ package memcached import ( "context" + "crypto/tls" "math/rand" + "net" "strconv" "sync" "time" @@ -221,7 +223,7 @@ func refreshServers(serverList *memcache.ServerList, srv string, resolver srv.Sr return nil } -func newMemcachedFromSrv(srv string, d time.Duration, resolver srv.SrvResolver) Client { +func newMemcachedFromSrv(srv string, d time.Duration, resolver srv.SrvResolver) *memcache.Client { serverList := new(memcache.ServerList) err := refreshServers(serverList, srv, resolver) if err != nil { @@ -245,13 +247,22 @@ func newMemcacheFromSettings(s settings.Settings) Client { if s.MemcacheSrv != "" && len(s.MemcacheHostPort) > 0 { panic(MemcacheError("Both MEMCADHE_HOST_PORT and MEMCACHE_SRV are set")) } + var client *memcache.Client if s.MemcacheSrv != "" { logger.Debugf("Using MEMCACHE_SRV: %v", s.MemcacheSrv) - return newMemcachedFromSrv(s.MemcacheSrv, s.MemcacheSrvRefresh, new(srv.DnsSrvResolver)) + client = newMemcachedFromSrv(s.MemcacheSrv, s.MemcacheSrvRefresh, new(srv.DnsSrvResolver)) + } else { + logger.Debugf("Using MEMCACHE_HOST_PORT: %v", s.MemcacheHostPort) + client = memcache.New(s.MemcacheHostPort...) } - logger.Debugf("Usng MEMCACHE_HOST_PORT:: %v", s.MemcacheHostPort) - client := memcache.New(s.MemcacheHostPort...) client.MaxIdleConns = s.MemcacheMaxIdleConns + if s.MemcacheTls { + client.DialContext = func(ctx context.Context, network, address string) (net.Conn, error) { + var td tls.Dialer + td.Config = s.MemcacheTlsConfig + return td.DialContext(ctx, network, address) + } + } return client } diff --git a/src/settings/settings.go b/src/settings/settings.go index 695c75aa..a8e1d7c5 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -157,9 +157,15 @@ type Settings struct { // number of connections to memcache kept idle in pool, if a connection is needed but none // are idle a new connection is opened, used and closed and can be left in a time-wait state // which can result in high CPU usage. - MemcacheMaxIdleConns int `envconfig:"MEMCACHE_MAX_IDLE_CONNS" default:"2"` - MemcacheSrv string `envconfig:"MEMCACHE_SRV" default:""` - MemcacheSrvRefresh time.Duration `envconfig:"MEMCACHE_SRV_REFRESH" default:"0"` + MemcacheMaxIdleConns int `envconfig:"MEMCACHE_MAX_IDLE_CONNS" default:"2"` + MemcacheSrv string `envconfig:"MEMCACHE_SRV" default:""` + MemcacheSrvRefresh time.Duration `envconfig:"MEMCACHE_SRV_REFRESH" default:"0"` + MemcacheTls bool `envconfig:"MEMCACHE_TLS" default:"false"` + MemcacheTlsConfig *tls.Config + MemcacheTlsClientCert string `envconfig:"MEMCACHE_TLS_CLIENT_CERT" default:""` + MemcacheTlsClientKey string `envconfig:"MEMCACHE_TLS_CLIENT_KEY" default:""` + MemcacheTlsCACert string `envconfig:"MEMCACHE_TLS_CACERT" default:""` + MemcacheTlsSkipHostnameVerification bool `envconfig:"MEMCACHE_TLS_SKIP_HOSTNAME_VERIFICATION" default:"false"` // Should the ratelimiting be running in Global shadow-mode, ie. never report a ratelimit status, unless a rate was provided from envoy as an override GlobalShadowMode bool `envconfig:"SHADOW_MODE" default:"false"` @@ -188,6 +194,7 @@ func NewSettings() Settings { } // When we require TLS to connect to Redis, we check if we need to connect using the provided key-pair. RedisTlsConfig(s.RedisTls || s.RedisPerSecondTls)(&s) + MemcacheTlsConfig(s.MemcacheTls)(&s) GrpcServerTlsConfig()(&s) ConfigGrpcXdsServerTlsConfig()(&s) return s @@ -205,6 +212,15 @@ func RedisTlsConfig(redisTls bool) Option { } } +func MemcacheTlsConfig(memcacheTls bool) Option { + return func(s *Settings) { + s.MemcacheTlsConfig = &tls.Config{} + if memcacheTls { + s.MemcacheTlsConfig = utils.TlsConfigFromFiles(s.MemcacheTlsClientCert, s.MemcacheTlsClientKey, s.MemcacheTlsCACert, utils.ServerCA, s.MemcacheTlsSkipHostnameVerification) + } + } +} + func GrpcServerTlsConfig() Option { return func(s *Settings) { if s.GrpcServerUseTLS { From c5ac0f0bd7c549ef7ff8f7fcdf0dc9c00eb2970d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jhonn=20W=2E=20Fraz=C3=A3o?= Date: Sun, 9 Jun 2024 22:06:12 -0300 Subject: [PATCH 127/181] fix(redis): use logger instead of println (#606) Signed-off-by: frazao.jhonn --- src/redis/driver_impl.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index f68addda..5f70bad7 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -42,7 +42,7 @@ func poolTrace(ps *poolStats, healthCheckActiveConnection bool, srv server.Serve } } } else { - fmt.Println("creating redis connection error :", newConn.Err) + logger.Errorf("creating redis connection error : %v", newConn.Err) } }, ConnClosed: func(_ trace.PoolConnClosed) { From 0895db5f515a1c10f979e62eeaefcb038e24c424 Mon Sep 17 00:00:00 2001 From: Josh Date: Thu, 13 Jun 2024 21:28:00 -0500 Subject: [PATCH 128/181] Initial integration of DogStatsD (#585) * Initial integration of DogStatsD including something called mogrifiers Signed-off-by: Josh Jaques * fix style errors in doc Signed-off-by: Josh Jaques * Fix variable name in README Signed-off-by: Josh Jaques * Add validations and test cases Signed-off-by: Josh Jaques * Amendment with improvements - handle out of bounds match in pattern handler - make it an error if both statsd sink are enabled - improve error wording - add more test cases Signed-off-by: Josh Jaques * fix incorrect timer manipulation Signed-off-by: Josh Jaques --------- Signed-off-by: Josh Jaques --- README.md | 54 +++++++ go.mod | 2 + go.sum | 13 ++ src/godogstats/dogstatsd_sink.go | 82 +++++++++++ src/godogstats/mogrifier_map.go | 107 ++++++++++++++ src/godogstats/mogrifier_map_test.go | 202 +++++++++++++++++++++++++++ src/service_cmd/runner/runner.go | 15 ++ src/settings/settings.go | 14 +- 8 files changed, 483 insertions(+), 6 deletions(-) create mode 100644 src/godogstats/dogstatsd_sink.go create mode 100644 src/godogstats/mogrifier_map.go create mode 100644 src/godogstats/mogrifier_map_test.go diff --git a/README.md b/README.md index af65117c..4189bb06 100644 --- a/README.md +++ b/README.md @@ -44,6 +44,9 @@ - [Statistics](#statistics) - [Statistics](#statistics-1) - [Statistics options](#statistics-options) + - [DogStatsD](#dogstatsd) + - [Example](#example) + - [Continued example:](#continued-example) - [HTTP Port](#http-port) - [/json endpoint](#json-endpoint) - [Debug Port](#debug-port) @@ -846,6 +849,57 @@ ratelimit.service.rate_limit.messaging.auth-service.over_limit.shadow_mode: 1 1. `EXTRA_TAGS`: set to `","` to tag all emitted stats with the provided tags. You might want to tag build commit or release version, for example. +## DogStatsD + +To enable dogstatsd integration set: + +1. `USE_DOG_STATSD`: `true` to use [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/?code-lang=go) + +dogstatsd also enables so called `mogrifiers` which can +convert from traditional stats tags into a combination of stat name and tags. + +To enable mogrifiers, set a comma-separated list of them in `DOG_STATSD_MOGRIFIERS`. + +e.g. `USE_DOG_STATSD_MOGRIFIERS`: `FOO,BAR` + +For each mogrifier, define variables that declare the mogrification + +1. `DOG_STATSD_MOGRIFIERS_%s_PATTERN`: The regex pattern to match on +2. `DOG_STATSD_MOGRIFIERS_%s_NAME`: The name of the metric to emit. Can contain variables. +3. `DOG_STATSD_MOGRIFIERS_%s_TAGS`: Comma-separated list of tags to emit. Can contain variables. + +Variables within mogrifiers are strings such as `$1`, `$2`, `$3` which can be used to reference +a match group from the regex pattern. + +### Example + +In the example below we will set mogrifier DOMAIN to adjust +`some.original.metric.TAG` to `some.original.metric` with tag `domain:TAG` + +First enable a single mogrifier: + +1. `USE_DOG_STATSD_MOGRIFIERS`: `DOMAIN` + +Then, declare the rules for the `DOMAIN` modifier: + +1. `DOG_STATSD_MOGRIFIER_DOMAIN_PATTERN`: `^some\.original\.metric\.(.*)$` +2. `DOG_STATSD_MOGRIFIER_DOMAIN_NAME`: `some.original.metric` +3. `DOG_STATSD_MOGRIFIER_DOMAIN_TAGS`: `domain:$1` + +### Continued example: + +Let's also set another mogrifier which outputs the hits metrics with a domain and descriptor tag + +First, enable an extra mogrifier: + +1. `USE_DOG_STATSD_MOGRIFIERS`: `DOMAIN,HITS` + +Then, declare additional rules for the `DESCRIPTOR` mogrifier + +1. `DOG_STATSD_MOGRIFIER_HITS_PATTERN`: `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$` +2. `DOG_STATSD_MOGRIFIER_HITS_NAME`: `ratelimit.service.rate_limit.$3` +3. `DOG_STATSD_MOGRIFIER_HITS_TAGS`: `domain:$1,descriptor:$2` + # HTTP Port The ratelimit service listens to HTTP 1.1 (by default on port 8080) with two endpoints: diff --git a/go.mod b/go.mod index 40b7cbfb..ebe4169c 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/envoyproxy/ratelimit go 1.21.5 require ( + github.com/DataDog/datadog-go/v5 v5.5.0 github.com/alicebob/miniredis/v2 v2.31.0 github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 github.com/coocood/freecache v1.2.4 @@ -34,6 +35,7 @@ require ( require ( cloud.google.com/go/compute/metadata v0.2.3 // indirect + github.com/Microsoft/go-winio v0.5.0 // indirect github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect diff --git a/go.sum b/go.sum index 6174f3c2..209ed510 100644 --- a/go.sum +++ b/go.sum @@ -5,7 +5,11 @@ cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//u cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= +github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= github.com/DmitriyVTitov/size v1.5.0/go.mod h1:le6rNI4CoLQV1b9gzp1+3d7hMAD/uu2QcJ+aYbNgiU0= +github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 h1:uvdUDbHQHO85qeSydJtItA4T55Pw6BtAejd0APRJOCE= github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= @@ -101,6 +105,7 @@ github.com/mediocregopher/radix/v3 v3.8.1 h1:rOkHflVuulFKlwsLY01/M2cM2tWCjDoETcM github.com/mediocregopher/radix/v3 v3.8.1/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 h1:pH+U6pJP0BhxqQ4njBUjOg0++WMMvv3eByWzB+oATBY= github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -110,16 +115,22 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -189,9 +200,11 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/src/godogstats/dogstatsd_sink.go b/src/godogstats/dogstatsd_sink.go new file mode 100644 index 00000000..8632627f --- /dev/null +++ b/src/godogstats/dogstatsd_sink.go @@ -0,0 +1,82 @@ +package godogstats + +import ( + "regexp" + "strconv" + "time" + + "github.com/DataDog/datadog-go/v5/statsd" + gostats "github.com/lyft/gostats" +) + +type godogStatsSink struct { + client *statsd.Client + config struct { + host string + port int + } + + mogrifier mogrifierMap +} + +// ensure that godogStatsSink implements gostats.Sink +var _ gostats.Sink = (*godogStatsSink)(nil) + +type goDogStatsSinkOption func(*godogStatsSink) + +func WithStatsdHost(host string) goDogStatsSinkOption { + return func(g *godogStatsSink) { + g.config.host = host + } +} + +func WithStatsdPort(port int) goDogStatsSinkOption { + return func(g *godogStatsSink) { + g.config.port = port + } +} + +func WithMogrifier(mogrifiers map[*regexp.Regexp]func([]string) (string, []string)) goDogStatsSinkOption { + return func(g *godogStatsSink) { + g.mogrifier = mogrifiers + } +} + +func WithMogrifierFromEnv(keys []string) goDogStatsSinkOption { + return func(g *godogStatsSink) { + mogrifier, err := newMogrifierMapFromEnv(keys) + if err != nil { + panic(err) + } + g.mogrifier = mogrifier + } +} + +func NewSink(opts ...goDogStatsSinkOption) (*godogStatsSink, error) { + sink := &godogStatsSink{} + for _, opt := range opts { + opt(sink) + } + client, err := statsd.New(sink.config.host+":"+strconv.Itoa(sink.config.port), statsd.WithoutClientSideAggregation()) + if err != nil { + return nil, err + } + sink.client = client + return sink, nil +} + +func (g *godogStatsSink) FlushCounter(name string, value uint64) { + name, tags := g.mogrifier.mogrify(name) + g.client.Count(name, int64(value), tags, 1.0) +} + +func (g *godogStatsSink) FlushGauge(name string, value uint64) { + name, tags := g.mogrifier.mogrify(name) + g.client.Gauge(name, float64(value), tags, 1.0) +} + +func (g *godogStatsSink) FlushTimer(name string, milliseconds float64) { + name, tags := g.mogrifier.mogrify(name) + duration := time.Duration(milliseconds) * time.Millisecond + g.client.Timing(name, duration, tags, 1.0) +} diff --git a/src/godogstats/mogrifier_map.go b/src/godogstats/mogrifier_map.go new file mode 100644 index 00000000..78d8ea0e --- /dev/null +++ b/src/godogstats/mogrifier_map.go @@ -0,0 +1,107 @@ +package godogstats + +import ( + "fmt" + "regexp" + "strconv" + + "github.com/kelseyhightower/envconfig" +) + +var varFinder = regexp.MustCompile(`\$\d+`) // matches $0, $1, etc. + +const envPrefix = "DOG_STATSD_MOGRIFIER" // prefix for environment variables + +// mogrifierMap is a map of regular expressions to functions that mogrify a name and return tags +type mogrifierMap map[*regexp.Regexp]func([]string) (string, []string) + +// makePatternHandler returns a function that replaces $0, $1, etc. in the pattern with the corresponding match +func makePatternHandler(pattern string) func([]string) string { + return func(matches []string) string { + return varFinder.ReplaceAllStringFunc(pattern, func(s string) string { + i, err := strconv.Atoi(s[1:]) + if i >= len(matches) || err != nil { + // Return the original placeholder if the index is out of bounds + // or the Atoi fails, though given the varFinder regex it should + // not be possible. + return s + } + return matches[i] + }) + } +} + +// newMogrifierMapFromEnv loads mogrifiers from environment variables +// keys is a list of mogrifier names to load +func newMogrifierMapFromEnv(keys []string) (mogrifierMap, error) { + mogrifiers := mogrifierMap{} + + type config struct { + Pattern string `envconfig:"PATTERN"` + Tags map[string]string `envconfig:"TAGS"` + Name string `envconfig:"NAME"` + } + + for _, mogrifier := range keys { + cfg := config{} + if err := envconfig.Process(envPrefix+"_"+mogrifier, &cfg); err != nil { + return nil, fmt.Errorf("failed to load mogrifier %s: %v", mogrifier, err) + } + + if cfg.Pattern == "" { + return nil, fmt.Errorf("no PATTERN specified for mogrifier %s", mogrifier) + } + + re, err := regexp.Compile(cfg.Pattern) + if err != nil { + return nil, fmt.Errorf("failed to compile pattern for %s: %s: %v", mogrifier, cfg.Pattern, err) + } + + if cfg.Name == "" { + return nil, fmt.Errorf("no NAME specified for mogrifier %s", mogrifier) + } + + nameHandler := makePatternHandler(cfg.Name) + tagHandlers := make(map[string]func([]string) string, len(cfg.Tags)) + for key, value := range cfg.Tags { + if key == "" { + return nil, fmt.Errorf("no key specified for tag %s for mogrifier %s", key, mogrifier) + } + tagHandlers[key] = makePatternHandler(value) + if value == "" { + return nil, fmt.Errorf("no value specified for tag %s for mogrifier %s", key, mogrifier) + } + } + + mogrifiers[re] = func(matches []string) (string, []string) { + name := nameHandler(matches) + tags := make([]string, 0, len(tagHandlers)) + for tagKey, handler := range tagHandlers { + tagValue := handler(matches) + tags = append(tags, tagKey+":"+tagValue) + } + return name, tags + } + + } + return mogrifiers, nil +} + +// mogrify applies the first mogrifier in the map that matches the name +func (m mogrifierMap) mogrify(name string) (string, []string) { + if m == nil { + return name, nil + } + for matcher, mogrifier := range m { + matches := matcher.FindStringSubmatch(name) + if len(matches) == 0 { + continue + } + + mogrifiedName, tags := mogrifier(matches) + return mogrifiedName, tags + } + + // no mogrification + return name, nil +} diff --git a/src/godogstats/mogrifier_map_test.go b/src/godogstats/mogrifier_map_test.go new file mode 100644 index 00000000..ec8eace3 --- /dev/null +++ b/src/godogstats/mogrifier_map_test.go @@ -0,0 +1,202 @@ +package godogstats + +import ( + "regexp" + "testing" + + "github.com/stretchr/testify/assert" +) + +func testMogrifier() mogrifierMap { + return mogrifierMap{ + regexp.MustCompile(`^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`): func(matches []string) (string, []string) { + name := "ratelimit.service.rate_limit." + matches[3] + tags := []string{"domain:" + matches[1], "descriptor:" + matches[2]} + return name, tags + }, + } +} + +func TestMogrify(t *testing.T) { + m := testMogrifier() + // Test case 1 + name1 := "ratelimit.service.rate_limit.mongo_cps.database_users.within_limit" + expectedMogrifiedName1 := "ratelimit.service.rate_limit.within_limit" + expectedTags1 := []string{"domain:mongo_cps", "descriptor:database_users"} + mogrifiedName1, tags1 := m.mogrify(name1) + assert.Equal(t, expectedMogrifiedName1, mogrifiedName1) + assert.Equal(t, expectedTags1, tags1) +} + +func TestEmpty(t *testing.T) { + m := mogrifierMap{} + name, tags := m.mogrify("ratelimit.service.rate_limit.mongo_cps.database_users.within_limit") + assert.Equal(t, "ratelimit.service.rate_limit.mongo_cps.database_users.within_limit", name) + assert.Empty(t, tags) +} + +func TestNil(t *testing.T) { + var m mogrifierMap + name, tags := m.mogrify("ratelimit.service.rate_limit.mongo_cps.database_users.within_limit") + assert.Equal(t, "ratelimit.service.rate_limit.mongo_cps.database_users.within_limit", name) + assert.Empty(t, tags) +} + +func TestLoadMogrifiersFromEnv(t *testing.T) { + tests := []struct { + name string + envVars map[string]string + input string + expectOutput string + expectedTags []string + keys []string + }{ + { + name: "Simple replacement", + envVars: map[string]string{ + "DOG_STATSD_MOGRIFIER_TAG_PATTERN": `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`, + "DOG_STATSD_MOGRIFIER_TAG_NAME": "ratelimit.service.rate_limit.$3", + "DOG_STATSD_MOGRIFIER_TAG_TAGS": "domain:$1,descriptor:$2", + }, + input: "ratelimit.service.rate_limit.mongo_cps.database_users.within_limit", + expectOutput: "ratelimit.service.rate_limit.within_limit", + expectedTags: []string{"domain:mongo_cps", "descriptor:database_users"}, + keys: []string{"TAG"}, + }, + { + name: "Out of bounds index", + envVars: map[string]string{ + "DOG_STATSD_MOGRIFIER_TAG_PATTERN": `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`, + "DOG_STATSD_MOGRIFIER_TAG_NAME": "ratelimit.service.rate_limit.$3", + "DOG_STATSD_MOGRIFIER_TAG_TAGS": "domain:$1,descriptor:$5", + }, + input: "ratelimit.service.rate_limit.mongo_cps.database_users.within_limit", + expectOutput: "ratelimit.service.rate_limit.within_limit", + expectedTags: []string{"domain:mongo_cps", "descriptor:$5"}, + keys: []string{"TAG"}, + }, + { + name: "No placeholders in tags", + envVars: map[string]string{ + "DOG_STATSD_MOGRIFIER_TAG_PATTERN": `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`, + "DOG_STATSD_MOGRIFIER_TAG_NAME": "ratelimit.service.rate_limit.$3", + "DOG_STATSD_MOGRIFIER_TAG_TAGS": "domain:mongo_cps,descriptor:database_users", + }, + input: "ratelimit.service.rate_limit.mongo_cps.database_users.within_limit", + expectOutput: "ratelimit.service.rate_limit.within_limit", + expectedTags: []string{"domain:mongo_cps", "descriptor:database_users"}, + keys: []string{"TAG"}, + }, + { + name: "No matches", + envVars: map[string]string{ + "DOG_STATSD_MOGRIFIER_TAG_PATTERN": `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`, + "DOG_STATSD_MOGRIFIER_TAG_NAME": "ratelimit.service.rate_limit.$3", + "DOG_STATSD_MOGRIFIER_TAG_TAGS": "domain:$1,descriptor:$4", + }, + input: "some.unmatched.metric", + expectOutput: "some.unmatched.metric", + keys: []string{"TAG"}, + }, + { + name: "Two mogrifiers: First match", + envVars: map[string]string{ + "DOG_STATSD_MOGRIFIER_SPECIFIC_PATTERN": `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.foo$`, + "DOG_STATSD_MOGRIFIER_SPECIFIC_NAME": "custom.foo", + "DOG_STATSD_MOGRIFIER_SPECIFIC_TAGS": "domain:$1,descriptor:$2", + "DOG_STATSD_MOGRIFIER_WILDCARD_PATTERN": `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`, + "DOG_STATSD_MOGRIFIER_WILDCARD_NAME": "ratelimit.service.rate_limit.$3", + "DOG_STATSD_MOGRIFIER_WILDCARD_TAGS": "domain:$1,descriptor:$2", + }, + input: "ratelimit.service.rate_limit.mongo_cps.database_users.foo", + expectOutput: "custom.foo", + expectedTags: []string{"domain:mongo_cps", "descriptor:database_users"}, + keys: []string{"SPECIFIC", "WILDCARD"}, + }, + { + name: "Two mogrifiers: second match", + envVars: map[string]string{ + "DOG_STATSD_MOGRIFIER_SPECIFIC_PATTERN": `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.foo$`, + "DOG_STATSD_MOGRIFIER_SPECIFIC_NAME": "custom.foo", + "DOG_STATSD_MOGRIFIER_SPECIFIC_TAGS": "domain:$1,descriptor:$2", + "DOG_STATSD_MOGRIFIER_WILDCARD_PATTERN": `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`, + "DOG_STATSD_MOGRIFIER_WILDCARD_NAME": "ratelimit.service.rate_limit.$3", + "DOG_STATSD_MOGRIFIER_WILDCARD_TAGS": "domain:$1,descriptor:$2", + }, + input: "ratelimit.service.rate_limit.mongo_cps.database_users.within_limit", + expectOutput: "ratelimit.service.rate_limit.within_limit", + expectedTags: []string{"domain:mongo_cps", "descriptor:database_users"}, + keys: []string{"SPECIFIC", "WILDCARD"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Set environment variables + for key, value := range tt.envVars { + t.Setenv(key, value) + } + + mogrifiers, err := newMogrifierMapFromEnv(tt.keys) + assert.NoError(t, err) + assert.NotNil(t, mogrifiers) + assert.Len(t, mogrifiers, len(tt.keys)) + + name, tags := mogrifiers.mogrify(tt.input) + assert.Equal(t, tt.expectOutput, name) + assert.ElementsMatch(t, tt.expectedTags, tags) + }) + } +} + +func TestValidation(t *testing.T) { + t.Run("No settings will fail", func(t *testing.T) { + _, err := newMogrifierMapFromEnv([]string{"TAG"}) + assert.Error(t, err) + }) + + t.Run("EmptyPattern", func(t *testing.T) { + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_PATTERN", "") + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_NAME", "ratelimit.service.rate_limit.$3") + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_TAGS", "domain:$1,descriptor:$2") + _, err := newMogrifierMapFromEnv([]string{"TAG"}) + assert.Error(t, err) + }) + + t.Run("EmptyName", func(t *testing.T) { + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_PATTERN", `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`) + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_NAME", "") + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_TAGS", "domain:$1,descriptor:$2") + _, err := newMogrifierMapFromEnv([]string{"TAG"}) + assert.Error(t, err) + }) + + t.Run("EmptyTagKey", func(t *testing.T) { + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_PATTERN", `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`) + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_NAME", "ratelimit.service.rate_limit.$3") + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_TAGS", ":5") + _, err := newMogrifierMapFromEnv([]string{"TAG"}) + assert.Error(t, err) + }) + + t.Run("EmptyTagValue", func(t *testing.T) { + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_PATTERN", `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`) + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_NAME", "ratelimit.service.rate_limit.$3") + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_TAGS", "domain:$1,descriptor:") + _, err := newMogrifierMapFromEnv([]string{"TAG"}) + assert.Error(t, err) + }) + + t.Run("Success w/ No mogrifiers", func(t *testing.T) { + _, err := newMogrifierMapFromEnv([]string{}) + assert.NoError(t, err) + }) + + t.Run("Success w/ mogrifier", func(t *testing.T) { + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_PATTERN", `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`) + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_NAME", "ratelimit.service.rate_limit.$3") + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_TAGS", "domain:$1,descriptor:$2") + _, err := newMogrifierMapFromEnv([]string{"TAG"}) + assert.NoError(t, err) + }) +} diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index f196ea4f..f645e58f 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -9,6 +9,7 @@ import ( "sync" "time" + "github.com/envoyproxy/ratelimit/src/godogstats" "github.com/envoyproxy/ratelimit/src/metrics" "github.com/envoyproxy/ratelimit/src/stats" "github.com/envoyproxy/ratelimit/src/trace" @@ -43,6 +44,20 @@ func NewRunner(s settings.Settings) Runner { if s.DisableStats { logger.Info("Stats disabled") store = gostats.NewStore(gostats.NewNullSink(), false) + } else if s.UseDogStatsd { + if s.UseStatsd { + logger.Fatalf("Error: unable to use both stats sink at the same time. Set either USE_DOG_STATSD or USE_STATSD but not both.") + } + var err error + sink, err := godogstats.NewSink( + godogstats.WithStatsdHost(s.StatsdHost), + godogstats.WithStatsdPort(s.StatsdPort), + godogstats.WithMogrifierFromEnv(s.UseDogStatsdMogrifiers)) + if err != nil { + logger.Fatalf("Failed to create dogstatsd sink: %v", err) + } + logger.Info("Stats initialized for dogstatsd") + store = gostats.NewStore(sink, false) } else if s.UseStatsd { logger.Info("Stats initialized for statsd") store = gostats.NewStore(gostats.NewTCPStatsdSink(gostats.WithStatsdHost(s.StatsdHost), gostats.WithStatsdPort(s.StatsdPort)), false) diff --git a/src/settings/settings.go b/src/settings/settings.go index a8e1d7c5..49da0ea5 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -79,12 +79,14 @@ type Settings struct { XdsClientBackoffJitter bool `envconfig:"XDS_CLIENT_BACKOFF_JITTER" default:"true"` // Stats-related settings - UseStatsd bool `envconfig:"USE_STATSD" default:"true"` - StatsdHost string `envconfig:"STATSD_HOST" default:"localhost"` - StatsdPort int `envconfig:"STATSD_PORT" default:"8125"` - ExtraTags map[string]string `envconfig:"EXTRA_TAGS" default:""` - StatsFlushInterval time.Duration `envconfig:"STATS_FLUSH_INTERVAL" default:"10s"` - DisableStats bool `envconfig:"DISABLE_STATS" default:"false"` + UseDogStatsd bool `envconfig:"USE_DOG_STATSD" default:"false"` + UseDogStatsdMogrifiers []string `envconfig:"USE_DOG_STATSD_MOGRIFIERS" default:""` + UseStatsd bool `envconfig:"USE_STATSD" default:"true"` + StatsdHost string `envconfig:"STATSD_HOST" default:"localhost"` + StatsdPort int `envconfig:"STATSD_PORT" default:"8125"` + ExtraTags map[string]string `envconfig:"EXTRA_TAGS" default:""` + StatsFlushInterval time.Duration `envconfig:"STATS_FLUSH_INTERVAL" default:"10s"` + DisableStats bool `envconfig:"DISABLE_STATS" default:"false"` // Settings for rate limit configuration RuntimePath string `envconfig:"RUNTIME_ROOT" default:"/srv/runtime_data/current"` From cc77e9514e7195fd401d8f4fe9c51aa6b11c0d06 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jun 2024 17:21:32 -0600 Subject: [PATCH 129/181] Bump google.golang.org/protobuf from 1.34.1 to 1.34.2 (#622) Bumps google.golang.org/protobuf from 1.34.1 to 1.34.2. --- updated-dependencies: - dependency-name: google.golang.org/protobuf dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ebe4169c..0500f2c3 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,7 @@ require ( go.opentelemetry.io/otel/trace v1.27.0 golang.org/x/net v0.26.0 google.golang.org/grpc v1.64.0 - google.golang.org/protobuf v1.34.1 + google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 v2.4.0 ) diff --git a/go.sum b/go.sum index 209ed510..d2c1a3f9 100644 --- a/go.sum +++ b/go.sum @@ -248,8 +248,8 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= From f1e143b9a343b5b7f6f98dc5217f51be82cd0c11 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jun 2024 17:21:47 -0600 Subject: [PATCH 130/181] Bump github/codeql-action from 3.25.7 to 3.25.10 (#621) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.25.7 to 3.25.10. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/f079b8493333aace61c81488f8bd40919487bd9f...23acc5c183826b7a8a97bce3cecc52db901f8251) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecard.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 12d4fc25..c18d5444 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -35,14 +35,14 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@f079b8493333aace61c81488f8bd40919487bd9f # v3.25.7 + uses: github/codeql-action/init@23acc5c183826b7a8a97bce3cecc52db901f8251 # v3.25.10 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@f079b8493333aace61c81488f8bd40919487bd9f # v3.25.7 + uses: github/codeql-action/autobuild@23acc5c183826b7a8a97bce3cecc52db901f8251 # v3.25.10 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@f079b8493333aace61c81488f8bd40919487bd9f # v3.25.7 + uses: github/codeql-action/analyze@23acc5c183826b7a8a97bce3cecc52db901f8251 # v3.25.10 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 11130553..eed1eaef 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -64,6 +64,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@f079b8493333aace61c81488f8bd40919487bd9f # v3.25.7 + uses: github/codeql-action/upload-sarif@23acc5c183826b7a8a97bce3cecc52db901f8251 # v3.25.10 with: sarif_file: results.sarif From 5701cd6876068e1c198982c930fcbfa6233caafc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jun 2024 17:22:01 -0600 Subject: [PATCH 131/181] Bump actions/checkout from 4.1.6 to 4.1.7 (#620) Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.6 to 4.1.7. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/a5ac7e51b41094c92402da3b24376905380afc29...692973e3d937129bcbf40652eb9f2f61becf3332) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 2 +- .github/workflows/main.yaml | 6 +++--- .github/workflows/pullrequest.yaml | 6 +++--- .github/workflows/release.yaml | 4 ++-- .github/workflows/scorecard.yml | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index c18d5444..ca5e3c7d 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -26,7 +26,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install Go uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index c1f9b7ba..f175796d 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -12,14 +12,14 @@ jobs: check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: check format run: make check_format build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Set up QEMU uses: docker/setup-qemu-action@27d0a4f181a40b142cce983c5393082c365d1480 # v1.2.0 @@ -41,7 +41,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: diff --git a/.github/workflows/pullrequest.yaml b/.github/workflows/pullrequest.yaml index e6b7edd5..4d23f53f 100644 --- a/.github/workflows/pullrequest.yaml +++ b/.github/workflows/pullrequest.yaml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: check format run: make check_format @@ -20,7 +20,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: build and test run: make docker_tests @@ -29,7 +29,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 0ae756ff..7a58302e 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -12,13 +12,13 @@ jobs: check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: check format run: make check_format build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Set up QEMU uses: docker/setup-qemu-action@27d0a4f181a40b142cce983c5393082c365d1480 # v1.2.0 diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index eed1eaef..f69e0191 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -29,7 +29,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: persist-credentials: false From cdf0405340c19d2e185cd22c82a86043a0ce5303 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jun 2024 17:24:07 -0600 Subject: [PATCH 132/181] Bump golang from 1.22.3 to 1.22.4 in /examples/xds-sotw-config-server (#618) Bumps golang from 1.22.3 to 1.22.4. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/xds-sotw-config-server/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/xds-sotw-config-server/Dockerfile b/examples/xds-sotw-config-server/Dockerfile index 2fff1497..c92b7fe0 100644 --- a/examples/xds-sotw-config-server/Dockerfile +++ b/examples/xds-sotw-config-server/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.3@sha256:b1e05e2c918f52c59d39ce7d5844f73b2f4511f7734add8bb98c9ecdd4443365 AS build +FROM golang:1.22.4@sha256:969349b8121a56d51c74f4c273ab974c15b3a8ae246a5cffc1df7d28b66cf978 AS build WORKDIR /xds-server COPY . . From 4f3c99dbcab8fb40ed8d47fe122a43e1ab24ad31 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jun 2024 17:24:28 -0600 Subject: [PATCH 133/181] Bump golang from 1.22.3 to 1.22.4 (#617) Bumps golang from 1.22.3 to 1.22.4. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- Dockerfile.integration | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 275fb12c..70379825 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.3@sha256:b1e05e2c918f52c59d39ce7d5844f73b2f4511f7734add8bb98c9ecdd4443365 AS build +FROM golang:1.22.4@sha256:969349b8121a56d51c74f4c273ab974c15b3a8ae246a5cffc1df7d28b66cf978 AS build WORKDIR /ratelimit ENV GOPROXY=https://proxy.golang.org diff --git a/Dockerfile.integration b/Dockerfile.integration index ed162cfc..20b76c1e 100644 --- a/Dockerfile.integration +++ b/Dockerfile.integration @@ -1,5 +1,5 @@ # Running this docker image runs the integration tests. -FROM golang@sha256:b1e05e2c918f52c59d39ce7d5844f73b2f4511f7734add8bb98c9ecdd4443365 +FROM golang@sha256:969349b8121a56d51c74f4c273ab974c15b3a8ae246a5cffc1df7d28b66cf978 RUN apt-get update -y && apt-get install sudo stunnel4 redis memcached -y && rm -rf /var/lib/apt/lists/* From 92487acf48e5cf599fe0ad1d2973d34e4c256f80 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jun 2024 17:25:40 -0600 Subject: [PATCH 134/181] Bump go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc (#615) Bumps [go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc](https://github.com/open-telemetry/opentelemetry-go-contrib) from 0.46.1 to 0.52.0. - [Release notes](https://github.com/open-telemetry/opentelemetry-go-contrib/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go-contrib/compare/zpages/v0.46.1...zpages/v0.52.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 5 ++--- go.sum | 15 ++++----------- 2 files changed, 6 insertions(+), 14 deletions(-) diff --git a/go.mod b/go.mod index 0500f2c3..9dfff441 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/mediocregopher/radix/v3 v3.8.1 github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.9.0 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 go.opentelemetry.io/otel v1.27.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 @@ -34,7 +34,6 @@ require ( ) require ( - cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/Microsoft/go-winio v0.5.0 // indirect github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect @@ -58,6 +57,6 @@ require ( golang.org/x/text v0.16.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index d2c1a3f9..0f564f34 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,4 @@ -cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU= -cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= @@ -139,8 +134,8 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= @@ -186,8 +181,6 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -239,8 +232,8 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 h1:P8OJ/WCl/Xo4E4zoe4/bifHpSmmKwARqyqE4nW6J2GQ= google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 h1:AgADTJarZTBqgjiUzRgfaBchgYB3/WFTC80GPwsMcRI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 h1:Q2RxlXqh1cgzzUgV261vBO2jI5R/3DD1J2pM0nI4NhU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= From e509f36dfdbdcbefb8603ae2f05cf69b32a62288 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jun 2024 17:26:05 -0600 Subject: [PATCH 135/181] Bump github.com/alicebob/miniredis/v2 from 2.31.0 to 2.33.0 (#614) Bumps [github.com/alicebob/miniredis/v2](https://github.com/alicebob/miniredis) from 2.31.0 to 2.33.0. - [Release notes](https://github.com/alicebob/miniredis/releases) - [Changelog](https://github.com/alicebob/miniredis/blob/master/CHANGELOG.md) - [Commits](https://github.com/alicebob/miniredis/compare/v2.31.0...v2.33.0) --- updated-dependencies: - dependency-name: github.com/alicebob/miniredis/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 12 ++---------- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/go.mod b/go.mod index 9dfff441..27d0819e 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21.5 require ( github.com/DataDog/datadog-go/v5 v5.5.0 - github.com/alicebob/miniredis/v2 v2.31.0 + github.com/alicebob/miniredis/v2 v2.33.0 github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 github.com/coocood/freecache v1.2.4 github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b diff --git a/go.sum b/go.sum index 0f564f34..1413faf2 100644 --- a/go.sum +++ b/go.sum @@ -2,14 +2,12 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= -github.com/DmitriyVTitov/size v1.5.0/go.mod h1:le6rNI4CoLQV1b9gzp1+3d7hMAD/uu2QcJ+aYbNgiU0= github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 h1:uvdUDbHQHO85qeSydJtItA4T55Pw6BtAejd0APRJOCE= github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= -github.com/alicebob/miniredis/v2 v2.31.0 h1:ObEFUNlJwoIiyjxdrYF0QIDE7qXcLc7D3WpSH4c22PU= -github.com/alicebob/miniredis/v2 v2.31.0/go.mod h1:UB/T2Uztp7MlFSDakaX1sTXUv5CASoprx0wulRT6HBg= +github.com/alicebob/miniredis/v2 v2.33.0 h1:uvTF0EDeu9RLnUEG27Db5I68ESoIxTiXbNUiji6lZrA= +github.com/alicebob/miniredis/v2 v2.33.0/go.mod h1:MhP4a3EU7aENRi9aO+tHfTBZicLqQevyi/DJpoj6mi0= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 h1:N7oVaKyGp8bttX0bfZGmcGkjz7DLQXhAn3DNd3T0ous= github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874/go.mod h1:r5xuitiExdLAJ09PR7vBVENGvp4ZuTBeWTGtxuX3K+c= @@ -21,9 +19,6 @@ github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91 github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc= @@ -54,7 +49,6 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= @@ -131,7 +125,6 @@ github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8 github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= @@ -188,7 +181,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= From 7ca7ecadc6efc2e3062fe7b08c538999d091c8bf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 18 Jun 2024 17:26:24 -0600 Subject: [PATCH 136/181] Bump go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp (#613) Bumps [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp](https://github.com/open-telemetry/opentelemetry-go) from 1.21.0 to 1.27.0. - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.21.0...v1.27.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 27d0819e..625c15c5 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( go.opentelemetry.io/otel v1.27.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 go.opentelemetry.io/otel/sdk v1.27.0 go.opentelemetry.io/otel/trace v1.27.0 golang.org/x/net v0.26.0 diff --git a/go.sum b/go.sum index 1413faf2..e030eaf1 100644 --- a/go.sum +++ b/go.sum @@ -135,8 +135,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULw go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= From 0ddd444cf5b15e333a4e2553e059ce8d69981a37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jhonn=20W=2E=20Fraz=C3=A3o?= Date: Tue, 18 Jun 2024 20:27:07 -0300 Subject: [PATCH 137/181] feat: add log error in should ratelimit (#612) Signed-off-by: frazao.jhonn --- src/service/ratelimit.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index e918d9bb..0268f1bc 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -285,7 +285,8 @@ func (this *service) ShouldRateLimit( return } - logger.Debugf("caught error during call") + logger.Debugf("caught error during call: %v", err) + finalResponse = nil switch t := err.(type) { case redis.RedisError: From db7fc7868b0921966761b8109b7fa231ae95ad5c Mon Sep 17 00:00:00 2001 From: Josh Date: Wed, 19 Jun 2024 21:14:55 -0500 Subject: [PATCH 138/181] feat: add stat for domain not found (#624) Signed-off-by: Josh Jaques --- src/config/config_impl.go | 2 ++ src/stats/manager.go | 9 +++++++++ src/stats/manager_impl.go | 7 +++++++ test/config/config_test.go | 3 +++ test/mocks/stats/manager.go | 9 +++++++++ 5 files changed, 30 insertions(+) diff --git a/src/config/config_impl.go b/src/config/config_impl.go index df0e473d..7ada07e1 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -285,6 +285,8 @@ func (this *rateLimitConfigImpl) GetLimit( value := this.domains[domain] if value == nil { logger.Debugf("unknown domain '%s'", domain) + domainStats := this.statsManager.NewDomainStats(domain) + domainStats.NotFound.Inc() return rateLimit } diff --git a/src/stats/manager.go b/src/stats/manager.go index ba83fd51..43d72d59 100644 --- a/src/stats/manager.go +++ b/src/stats/manager.go @@ -9,6 +9,9 @@ type Manager interface { // NewStats provides a RateLimitStats structure associated with a given descriptorKey. // Multiple calls with the same descriptorKey argument are guaranteed to be equivalent. NewStats(descriptorKey string) RateLimitStats + // Gets stats for a domain (when no descriptors are found) + // Multiple calls with the same domain argument are guaranteed to be equivalent. + NewDomainStats(domain string) DomainStats // Initializes a ShouldRateLimitStats structure. // Multiple calls to this method are idempotent. NewShouldRateLimitStats() ShouldRateLimitStats @@ -52,3 +55,9 @@ type RateLimitStats struct { WithinLimit gostats.Counter ShadowMode gostats.Counter } + +// Stats for a domain entry +type DomainStats struct { + Key string + NotFound gostats.Counter +} diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index efe8aa07..ee0cf013 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -39,6 +39,13 @@ func (this *ManagerImpl) NewStats(key string) RateLimitStats { return ret } +func (this *ManagerImpl) NewDomainStats(domain string) DomainStats { + ret := DomainStats{} + domain = utils.SanitizeStatName(domain) + ret.NotFound = this.rlStatsScope.NewCounter(domain + ".domain_not_found") + return ret +} + func (this *ManagerImpl) NewShouldRateLimitStats() ShouldRateLimitStats { ret := ShouldRateLimitStats{} ret.RedisError = this.shouldRateLimitScope.NewCounter("redis_error") diff --git a/test/config/config_test.go b/test/config/config_test.go index eb52974f..3c06b088 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -33,8 +33,11 @@ func TestBasicConfig(t *testing.T) { rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), mockstats.NewMockStatManager(stats), false) rlConfig.Dump() assert.Equal(rlConfig.IsEmptyDomains(), false) + assert.EqualValues(0, stats.NewCounter("foo_domain.domain_not_found").Value()) assert.Nil(rlConfig.GetLimit(context.TODO(), "foo_domain", &pb_struct.RateLimitDescriptor{})) + assert.EqualValues(1, stats.NewCounter("foo_domain.domain_not_found").Value()) assert.Nil(rlConfig.GetLimit(context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{})) + assert.EqualValues(0, stats.NewCounter("test-domain.domain_not_found").Value()) rl := rlConfig.GetLimit( context.TODO(), "test-domain", diff --git a/test/mocks/stats/manager.go b/test/mocks/stats/manager.go index dd9db246..522f8486 100644 --- a/test/mocks/stats/manager.go +++ b/test/mocks/stats/manager.go @@ -48,6 +48,15 @@ func (m *MockStatManager) NewStats(key string) stats.RateLimitStats { return ret } +func (m *MockStatManager) NewDomainStats(key string) stats.DomainStats { + ret := stats.DomainStats{} + logger.Debugf("outputing test domain stats %s", key) + ret.Key = key + ret.NotFound = m.store.NewCounter(key + ".domain_not_found") + + return ret +} + func NewMockStatManager(store gostats.Store) stats.Manager { return &MockStatManager{store: store} } From 00d7d6cb12deef6652940ffc482df59cb28364dd Mon Sep 17 00:00:00 2001 From: Josh Date: Thu, 20 Jun 2024 10:53:31 -0500 Subject: [PATCH 139/181] feat(dogstatsd_sink): support EXTRA_TAGS (#625) When using the godogstats sink, previously the EXTRA_TAGS would not be emitted as datadog tags. Signed-off-by: Josh Jaques --- README.md | 2 +- src/godogstats/dogstatsd_sink.go | 50 +++++++++++++- src/godogstats/dogstatsd_sink_test.go | 98 +++++++++++++++++++++++++++ 3 files changed, 146 insertions(+), 4 deletions(-) create mode 100644 src/godogstats/dogstatsd_sink_test.go diff --git a/README.md b/README.md index 4189bb06..25a5daee 100644 --- a/README.md +++ b/README.md @@ -896,7 +896,7 @@ First, enable an extra mogrifier: Then, declare additional rules for the `DESCRIPTOR` mogrifier -1. `DOG_STATSD_MOGRIFIER_HITS_PATTERN`: `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$` +1. `DOG_STATSD_MOGRIFIER_HITS_PATTERN`: `^ratelimit\.service\.rate_limit\.([^.]+)\.(.*)\.([^.]+)$` 2. `DOG_STATSD_MOGRIFIER_HITS_NAME`: `ratelimit.service.rate_limit.$3` 3. `DOG_STATSD_MOGRIFIER_HITS_TAGS`: `domain:$1,descriptor:$2` diff --git a/src/godogstats/dogstatsd_sink.go b/src/godogstats/dogstatsd_sink.go index 8632627f..98e94091 100644 --- a/src/godogstats/dogstatsd_sink.go +++ b/src/godogstats/dogstatsd_sink.go @@ -3,10 +3,12 @@ package godogstats import ( "regexp" "strconv" + "strings" "time" "github.com/DataDog/datadog-go/v5/statsd" gostats "github.com/lyft/gostats" + logger "github.com/sirupsen/logrus" ) type godogStatsSink struct { @@ -65,18 +67,60 @@ func NewSink(opts ...goDogStatsSinkOption) (*godogStatsSink, error) { return sink, nil } -func (g *godogStatsSink) FlushCounter(name string, value uint64) { +// separateTags separates the metric name and tags from the combined serialized metric name. +// e.g. given input: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits.__COMMIT=12345.__DEPLOY=67890" +// this should produce output: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits", ["COMMIT:12345", "DEPLOY:67890"] +// Aligns to how tags are serialized here https://github.com/lyft/gostats/blob/49e70f1b7932d146fecd991be04f8e1ad235452c/internal/tags/tags.go#L335 +func separateTags(name string) (string, []string) { + const ( + prefix = ".__" + sep = "=" + ) + + // split the name and tags about the first prefix for extra tags + shortName, tagString, hasTags := strings.Cut(name, prefix) + if !hasTags { + return name, nil + } + + // split the tags at every instance of prefix + tagPairs := strings.Split(tagString, prefix) + tags := make([]string, 0, len(tagPairs)) + for _, tagPair := range tagPairs { + // split the name + value by the seperator + tagName, tagValue, isValid := strings.Cut(tagPair, sep) + if !isValid { + logger.Debugf("godogstats sink found malformed extra tag: %v, string: %v", tagPair, name) + continue + } + tags = append(tags, tagName+":"+tagValue) + } + + return shortName, tags +} + +// mogrify takes a serialized metric name as input (internal gostats format) +// and returns a metric name and list of tags (dogstatsd output format) +// the output list of tags includes any "tags" that are serialized into the metric name, +// as well as any other tags emitted by the mogrifier config +func (g *godogStatsSink) mogrify(name string) (string, []string) { + name, extraTags := separateTags(name) name, tags := g.mogrifier.mogrify(name) + return name, append(extraTags, tags...) +} + +func (g *godogStatsSink) FlushCounter(name string, value uint64) { + name, tags := g.mogrify(name) g.client.Count(name, int64(value), tags, 1.0) } func (g *godogStatsSink) FlushGauge(name string, value uint64) { - name, tags := g.mogrifier.mogrify(name) + name, tags := g.mogrify(name) g.client.Gauge(name, float64(value), tags, 1.0) } func (g *godogStatsSink) FlushTimer(name string, milliseconds float64) { - name, tags := g.mogrifier.mogrify(name) + name, tags := g.mogrify(name) duration := time.Duration(milliseconds) * time.Millisecond g.client.Timing(name, duration, tags, 1.0) } diff --git a/src/godogstats/dogstatsd_sink_test.go b/src/godogstats/dogstatsd_sink_test.go new file mode 100644 index 00000000..9b78552d --- /dev/null +++ b/src/godogstats/dogstatsd_sink_test.go @@ -0,0 +1,98 @@ +package godogstats + +import ( + "regexp" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSeparateExtraTags(t *testing.T) { + tests := []struct { + name string + givenMetric string + expectOutput string + expectTags []string + }{ + { + name: "no extra tags", + givenMetric: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits", + expectOutput: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits", + expectTags: nil, + }, + { + name: "one extra tags", + givenMetric: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits.__COMMIT=12345", + expectOutput: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits", + expectTags: []string{"COMMIT:12345"}, + }, + { + name: "two extra tags", + givenMetric: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits.__COMMIT=12345.__DEPLOY=6890", + expectOutput: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits", + expectTags: []string{"COMMIT:12345", "DEPLOY:6890"}, + }, + { + name: "invalid extra tag no value", + givenMetric: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits.__COMMIT", + expectOutput: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits", + expectTags: []string{}, + }, + } + + for _, tt := range tests { + actualName, actualTags := separateTags(tt.givenMetric) + + assert.Equal(t, tt.expectOutput, actualName) + assert.Equal(t, tt.expectTags, actualTags) + } +} + +func TestSinkMogrify(t *testing.T) { + g := &godogStatsSink{ + mogrifier: mogrifierMap{ + regexp.MustCompile(`^ratelimit\.(.*)$`): func(matches []string) (string, []string) { + return "custom." + matches[1], []string{"tag1:value1", "tag2:value2"} + }, + }, + } + + tests := []struct { + name string + input string + expectedName string + expectedTags []string + }{ + { + name: "mogrify with match and extra tags", + input: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits.__COMMIT=12345.__DEPLOY=67890", + expectedName: "custom.service.rate_limit.mongo_cps.database_users.total_hits", + expectedTags: []string{"COMMIT:12345", "DEPLOY:67890", "tag1:value1", "tag2:value2"}, + }, + { + name: "mogrify with match without extra tags", + input: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits", + expectedName: "custom.service.rate_limit.mongo_cps.database_users.total_hits", + expectedTags: []string{"tag1:value1", "tag2:value2"}, + }, + { + name: "extra tags with no match", + input: "foo.service.rate_limit.mongo_cps.database_users.total_hits.__COMMIT=12345.__DEPLOY=67890", + expectedName: "foo.service.rate_limit.mongo_cps.database_users.total_hits", + expectedTags: []string{"COMMIT:12345", "DEPLOY:67890"}, + }, + { + name: "no mogrification", + input: "other.metric.name", + expectedName: "other.metric.name", + expectedTags: nil, + }, + } + + for _, tt := range tests { + actualName, actualTags := g.mogrify(tt.input) + + assert.Equal(t, tt.expectedName, actualName) + assert.Equal(t, tt.expectedTags, actualTags) + } +} From 91484c5983f85f62e97d18475bc3d5a12b43b9ca Mon Sep 17 00:00:00 2001 From: Josh Date: Thu, 20 Jun 2024 13:42:02 -0500 Subject: [PATCH 140/181] fix(dogstatsd_sink): switch from pure map to list of key/value pairs (#627) The test for Two mogrifiers: First match has flaky results because the underlying iteration order from the map is not guaranteed. Sometimes matching first and sometimes matching second mogrifier. Having a consistent mogrifier order is required for some types of priority based manipulation, and that is how the keys are defined in the environment, so changing the overall implementation to a slice to maintain ordered iteration is the best option. And can be done by only changing the internal private types. Signed-off-by: Josh Jaques --- src/godogstats/dogstatsd_sink.go | 8 +++++- src/godogstats/dogstatsd_sink_test.go | 7 +++-- src/godogstats/mogrifier_map.go | 39 ++++++++++++++++----------- src/godogstats/mogrifier_map_test.go | 11 +++++--- 4 files changed, 43 insertions(+), 22 deletions(-) diff --git a/src/godogstats/dogstatsd_sink.go b/src/godogstats/dogstatsd_sink.go index 98e94091..022e3fc1 100644 --- a/src/godogstats/dogstatsd_sink.go +++ b/src/godogstats/dogstatsd_sink.go @@ -38,9 +38,15 @@ func WithStatsdPort(port int) goDogStatsSinkOption { } } +// WithMogrifier adds a mogrifier to the sink. Map iteration order is randomized, to control order call multiple times. func WithMogrifier(mogrifiers map[*regexp.Regexp]func([]string) (string, []string)) goDogStatsSinkOption { return func(g *godogStatsSink) { - g.mogrifier = mogrifiers + for m, h := range mogrifiers { + g.mogrifier = append(g.mogrifier, mogrifierEntry{ + matcher: m, + handler: h, + }) + } } } diff --git a/src/godogstats/dogstatsd_sink_test.go b/src/godogstats/dogstatsd_sink_test.go index 9b78552d..0e021e92 100644 --- a/src/godogstats/dogstatsd_sink_test.go +++ b/src/godogstats/dogstatsd_sink_test.go @@ -51,8 +51,11 @@ func TestSeparateExtraTags(t *testing.T) { func TestSinkMogrify(t *testing.T) { g := &godogStatsSink{ mogrifier: mogrifierMap{ - regexp.MustCompile(`^ratelimit\.(.*)$`): func(matches []string) (string, []string) { - return "custom." + matches[1], []string{"tag1:value1", "tag2:value2"} + { + matcher: regexp.MustCompile(`^ratelimit\.(.*)$`), + handler: func(matches []string) (string, []string) { + return "custom." + matches[1], []string{"tag1:value1", "tag2:value2"} + }, }, }, } diff --git a/src/godogstats/mogrifier_map.go b/src/godogstats/mogrifier_map.go index 78d8ea0e..8983a7f6 100644 --- a/src/godogstats/mogrifier_map.go +++ b/src/godogstats/mogrifier_map.go @@ -12,8 +12,13 @@ var varFinder = regexp.MustCompile(`\$\d+`) // matches $0, $1, etc. const envPrefix = "DOG_STATSD_MOGRIFIER" // prefix for environment variables -// mogrifierMap is a map of regular expressions to functions that mogrify a name and return tags -type mogrifierMap map[*regexp.Regexp]func([]string) (string, []string) +type mogrifierEntry struct { + matcher *regexp.Regexp // the matcher determines whether a mogrifier should run on a metric at all + handler func(matches []string) (name string, tags []string) // the handler takes the list of matches, and returns metric name and list of tags +} + +// mogrifierMap is an ordered map of regular expressions to functions that mogrify a name and return tags +type mogrifierMap []mogrifierEntry // makePatternHandler returns a function that replaces $0, $1, etc. in the pattern with the corresponding match func makePatternHandler(pattern string) func([]string) string { @@ -73,32 +78,36 @@ func newMogrifierMapFromEnv(keys []string) (mogrifierMap, error) { } } - mogrifiers[re] = func(matches []string) (string, []string) { - name := nameHandler(matches) - tags := make([]string, 0, len(tagHandlers)) - for tagKey, handler := range tagHandlers { - tagValue := handler(matches) - tags = append(tags, tagKey+":"+tagValue) - } - return name, tags - } + mogrifiers = append(mogrifiers, mogrifierEntry{ + matcher: re, + handler: func(matches []string) (string, []string) { + name := nameHandler(matches) + tags := make([]string, 0, len(tagHandlers)) + for tagKey, handler := range tagHandlers { + tagValue := handler(matches) + tags = append(tags, tagKey+":"+tagValue) + } + return name, tags + }, + }, + ) } return mogrifiers, nil } // mogrify applies the first mogrifier in the map that matches the name -func (m mogrifierMap) mogrify(name string) (string, []string) { +func (m *mogrifierMap) mogrify(name string) (string, []string) { if m == nil { return name, nil } - for matcher, mogrifier := range m { - matches := matcher.FindStringSubmatch(name) + for _, mogrifier := range *m { + matches := mogrifier.matcher.FindStringSubmatch(name) if len(matches) == 0 { continue } - mogrifiedName, tags := mogrifier(matches) + mogrifiedName, tags := mogrifier.handler(matches) return mogrifiedName, tags } diff --git a/src/godogstats/mogrifier_map_test.go b/src/godogstats/mogrifier_map_test.go index ec8eace3..f44d2bce 100644 --- a/src/godogstats/mogrifier_map_test.go +++ b/src/godogstats/mogrifier_map_test.go @@ -9,10 +9,13 @@ import ( func testMogrifier() mogrifierMap { return mogrifierMap{ - regexp.MustCompile(`^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`): func(matches []string) (string, []string) { - name := "ratelimit.service.rate_limit." + matches[3] - tags := []string{"domain:" + matches[1], "descriptor:" + matches[2]} - return name, tags + { + matcher: regexp.MustCompile(`^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`), + handler: func(matches []string) (string, []string) { + name := "ratelimit.service.rate_limit." + matches[3] + tags := []string{"domain:" + matches[1], "descriptor:" + matches[2]} + return name, tags + }, }, } } From 3d87d0e87cf3f371d176952b75b530d53a1ba497 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 Jul 2024 21:29:08 -0600 Subject: [PATCH 141/181] Bump alpine from 3.20.0 to 3.20.2 (#661) Bumps alpine from 3.20.0 to 3.20.2. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 70379825..1e16f7be 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,6 +10,6 @@ COPY script script RUN CGO_ENABLED=0 GOOS=linux go build -o /go/bin/ratelimit -ldflags="-w -s" -v github.com/envoyproxy/ratelimit/src/service_cmd -FROM alpine:3.20.0@sha256:77726ef6b57ddf65bb551896826ec38bc3e53f75cdde31354fbffb4f25238ebd AS final +FROM alpine:3.20.2@sha256:0a4eaa0eecf5f8c050e5bba433f58c052be7587ee8af3e8b3910ef9ab5fbe9f5 AS final RUN apk --no-cache add ca-certificates && apk --no-cache update COPY --from=build /go/bin/ratelimit /bin/ratelimit From c1de7ec5aba0e473ecbf58a8414eb91a461fd15a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 Jul 2024 21:29:27 -0600 Subject: [PATCH 142/181] Bump docker/setup-qemu-action from 1.2.0 to 3.2.0 (#660) Bumps [docker/setup-qemu-action](https://github.com/docker/setup-qemu-action) from 1.2.0 to 3.2.0. - [Release notes](https://github.com/docker/setup-qemu-action/releases) - [Commits](https://github.com/docker/setup-qemu-action/compare/27d0a4f181a40b142cce983c5393082c365d1480...49b3bc8e6bdd4a60e6116a5414239cba5943d3cf) --- updated-dependencies: - dependency-name: docker/setup-qemu-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/main.yaml | 2 +- .github/workflows/release.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index f175796d..f2e27527 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -22,7 +22,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Set up QEMU - uses: docker/setup-qemu-action@27d0a4f181a40b142cce983c5393082c365d1480 # v1.2.0 + uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - name: Set up Docker buildx id: buildx diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 7a58302e..0dcdf5c0 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -21,7 +21,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Set up QEMU - uses: docker/setup-qemu-action@27d0a4f181a40b142cce983c5393082c365d1480 # v1.2.0 + uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - name: Set up Docker buildx id: buildx From c5bbc24b99e2654937e0ed783448ab726547ad32 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 Jul 2024 21:29:45 -0600 Subject: [PATCH 143/181] Bump github/codeql-action from 3.25.10 to 3.25.15 (#659) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.25.10 to 3.25.15. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/23acc5c183826b7a8a97bce3cecc52db901f8251...afb54ba388a7dca6ecae48f608c4ff05ff4cc77a) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecard.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index ca5e3c7d..a41c8bb9 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -35,14 +35,14 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@23acc5c183826b7a8a97bce3cecc52db901f8251 # v3.25.10 + uses: github/codeql-action/init@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@23acc5c183826b7a8a97bce3cecc52db901f8251 # v3.25.10 + uses: github/codeql-action/autobuild@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@23acc5c183826b7a8a97bce3cecc52db901f8251 # v3.25.10 + uses: github/codeql-action/analyze@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index f69e0191..66b63038 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -64,6 +64,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@23acc5c183826b7a8a97bce3cecc52db901f8251 # v3.25.10 + uses: github/codeql-action/upload-sarif@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15 with: sarif_file: results.sarif From 50d4d197a8510b703e0b648511cd3bd61a34d6a5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 Jul 2024 21:30:05 -0600 Subject: [PATCH 144/181] Bump docker/setup-buildx-action from 3.2.0 to 3.5.0 (#658) Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 3.2.0 to 3.5.0. - [Release notes](https://github.com/docker/setup-buildx-action/releases) - [Commits](https://github.com/docker/setup-buildx-action/compare/2b51285047da1547ffb1b2203d8be4c0af6b1f20...aa33708b10e362ff993539393ff100fa93ed6a27) --- updated-dependencies: - dependency-name: docker/setup-buildx-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/main.yaml | 2 +- .github/workflows/release.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index f2e27527..0a4310ee 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -26,7 +26,7 @@ jobs: - name: Set up Docker buildx id: buildx - uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20 # v3.2.0 + uses: docker/setup-buildx-action@aa33708b10e362ff993539393ff100fa93ed6a27 # v3.5.0 - name: build and push docker image run: | diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 0dcdf5c0..9381ae27 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -25,7 +25,7 @@ jobs: - name: Set up Docker buildx id: buildx - uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20 # v3.2.0 + uses: docker/setup-buildx-action@aa33708b10e362ff993539393ff100fa93ed6a27 # v3.5.0 - name: build and push docker image run: | From 321e333aaa48526f3725b1261f78f0e288c09337 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 Jul 2024 21:30:26 -0600 Subject: [PATCH 145/181] Bump alpine from `77726ef` to `0a4eaa0` in /integration-test (#657) Bumps alpine from `77726ef` to `0a4eaa0`. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- integration-test/Dockerfile.tester | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-test/Dockerfile.tester b/integration-test/Dockerfile.tester index 04724e84..fdf72745 100644 --- a/integration-test/Dockerfile.tester +++ b/integration-test/Dockerfile.tester @@ -1,4 +1,4 @@ -FROM alpine@sha256:77726ef6b57ddf65bb551896826ec38bc3e53f75cdde31354fbffb4f25238ebd +FROM alpine@sha256:0a4eaa0eecf5f8c050e5bba433f58c052be7587ee8af3e8b3910ef9ab5fbe9f5 USER root From 30a4ce1af92e248e44b9c71fe7dc401ff0d33d5a Mon Sep 17 00:00:00 2001 From: John Zheng Date: Thu, 1 Aug 2024 11:31:07 +0800 Subject: [PATCH 146/181] improve go version, to fix security issue. (#656) Signed-off-by: John --- Dockerfile | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 1e16f7be..1fac024a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.4@sha256:969349b8121a56d51c74f4c273ab974c15b3a8ae246a5cffc1df7d28b66cf978 AS build +FROM golang:1.22.5@sha256:86a3c48a61915a8c62c0e1d7594730399caa3feb73655dfe96c7bc17710e96cf AS build WORKDIR /ratelimit ENV GOPROXY=https://proxy.golang.org diff --git a/go.mod b/go.mod index 625c15c5..eb80fa2b 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/envoyproxy/ratelimit -go 1.21.5 +go 1.21.11 require ( github.com/DataDog/datadog-go/v5 v5.5.0 From 6a63b38f0fb8bb0a4b08646a3c998abe3bc310cc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 Jul 2024 21:31:23 -0600 Subject: [PATCH 147/181] Bump actions/setup-python from 5.1.0 to 5.1.1 (#649) Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5.1.0 to 5.1.1. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/82c7e631bb3cdc910f68e0081d67478d79c6982d...39cd14951b08e74b54015e9e001cdefcf80e669f) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/main.yaml | 2 +- .github/workflows/pullrequest.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 0a4310ee..3ad1bfba 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -43,7 +43,7 @@ jobs: steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: "3.9" diff --git a/.github/workflows/pullrequest.yaml b/.github/workflows/pullrequest.yaml index 4d23f53f..6cb4f4ca 100644 --- a/.github/workflows/pullrequest.yaml +++ b/.github/workflows/pullrequest.yaml @@ -31,7 +31,7 @@ jobs: steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: "3.9" From b450b61b4910d4e5ce279b343356f17e450f8c76 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 Jul 2024 21:32:02 -0600 Subject: [PATCH 148/181] Bump google.golang.org/grpc in /examples/xds-sotw-config-server (#646) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.64.0 to 1.65.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.64.0...v1.65.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/xds-sotw-config-server/go.mod | 17 +++++++------ examples/xds-sotw-config-server/go.sum | 34 ++++++++++++++------------ 2 files changed, 27 insertions(+), 24 deletions(-) diff --git a/examples/xds-sotw-config-server/go.mod b/examples/xds-sotw-config-server/go.mod index 462c1b6f..67e53db8 100644 --- a/examples/xds-sotw-config-server/go.mod +++ b/examples/xds-sotw-config-server/go.mod @@ -4,19 +4,20 @@ go 1.21.5 require ( github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b - google.golang.org/grpc v1.64.0 + google.golang.org/grpc v1.65.0 ) require ( + cel.dev/expr v0.15.0 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect - github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 // indirect + github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 // indirect - golang.org/x/net v0.22.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect - google.golang.org/protobuf v1.33.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/protobuf v1.34.1 // indirect ) diff --git a/examples/xds-sotw-config-server/go.sum b/examples/xds-sotw-config-server/go.sum index d3d08558..6c12801f 100644 --- a/examples/xds-sotw-config-server/go.sum +++ b/examples/xds-sotw-config-server/go.sum @@ -1,7 +1,9 @@ +cel.dev/expr v0.15.0 h1:O1jzfJCQBfL5BFoYktaxwIhuttaQPsVWerH9/EEKx0w= +cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc= -github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b h1:M0BhcNaW04UV1haQO8IFSDB64dAeiBSsTMZks/sYDcQ= @@ -18,19 +20,19 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4= -google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= From 30f27b010cd683e0702aac3edf06b537a95556c2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 Jul 2024 21:32:19 -0600 Subject: [PATCH 149/181] Bump actions/upload-artifact from 4.3.3 to 4.3.4 (#644) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.3.3 to 4.3.4. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/65462800fd760344b1a7b4382951275a0abb4808...0b2256b8c012f0828dc542b3febcab082c67f72b) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/scorecard.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 66b63038..077182da 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -56,7 +56,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 with: name: SARIF file path: results.sarif From 80b83bfb5f08bbbe7ca8c04b01c48ae1946b0052 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 Jul 2024 21:32:42 -0600 Subject: [PATCH 150/181] Bump go.opentelemetry.io/otel from 1.27.0 to 1.28.0 (#641) Bumps [go.opentelemetry.io/otel](https://github.com/open-telemetry/opentelemetry-go) from 1.27.0 to 1.28.0. - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.27.0...v1.28.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/otel dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index eb80fa2b..bb08a15b 100644 --- a/go.mod +++ b/go.mod @@ -21,12 +21,12 @@ require ( github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 - go.opentelemetry.io/otel v1.27.0 + go.opentelemetry.io/otel v1.28.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 go.opentelemetry.io/otel/sdk v1.27.0 - go.opentelemetry.io/otel/trace v1.27.0 + go.opentelemetry.io/otel/trace v1.28.0 golang.org/x/net v0.26.0 google.golang.org/grpc v1.64.0 google.golang.org/protobuf v1.34.2 @@ -43,7 +43,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect @@ -51,7 +51,7 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/yuin/gopher-lua v1.1.1 // indirect - go.opentelemetry.io/otel/metric v1.27.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.2.0 // indirect golang.org/x/sys v0.21.0 // indirect golang.org/x/text v0.16.0 // indirect diff --git a/go.sum b/go.sum index e030eaf1..408a802e 100644 --- a/go.sum +++ b/go.sum @@ -42,8 +42,8 @@ github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyT github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -129,20 +129,20 @@ github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= From 05bf226d3d1b812110fe5722b619960a7455279a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 Jul 2024 21:33:04 -0600 Subject: [PATCH 151/181] Bump go.opentelemetry.io/otel/trace from 1.27.0 to 1.28.0 (#640) Bumps [go.opentelemetry.io/otel/trace](https://github.com/open-telemetry/opentelemetry-go) from 1.27.0 to 1.28.0. - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.27.0...v1.28.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/otel/trace dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> From f4af2dbe812a656901af1f5969f58f2408ccf796 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Br=C3=BCderl?= Date: Thu, 1 Aug 2024 05:34:51 +0200 Subject: [PATCH 152/181] runner: close rate limiter on Stop (#635) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit redis client is never closed, even after graceful shutdown. It is possible to observe this by creating a runner, and calling Stop(), and stopping redis. Runner is expected to be stopped, and all components closed, however still logs can be seen that redis is unavailable. this patch allows runner to actually gracefully close rate limit implementations, so all components are closed gracefully on Stop. Signed-off-by: Johannes Brüderl --- src/redis/cache_impl.go | 8 ++++++-- src/service_cmd/runner/runner.go | 22 +++++++++++++++------- src/utils/multi_closer.go | 18 ++++++++++++++++++ 3 files changed, 39 insertions(+), 9 deletions(-) create mode 100644 src/utils/multi_closer.go diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index 0b0a45b4..30890786 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -1,6 +1,7 @@ package redis import ( + "io" "math/rand" "github.com/coocood/freecache" @@ -12,15 +13,18 @@ import ( "github.com/envoyproxy/ratelimit/src/utils" ) -func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, statsManager stats.Manager) limiter.RateLimitCache { +func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, statsManager stats.Manager) (limiter.RateLimitCache, io.Closer) { + closer := &utils.MultiCloser{} var perSecondPool Client if s.RedisPerSecond { perSecondPool = NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, s.RedisPerSecondSocketType, s.RedisPerSecondType, s.RedisPerSecondUrl, s.RedisPerSecondPoolSize, s.RedisPerSecondPipelineWindow, s.RedisPerSecondPipelineLimit, s.RedisTlsConfig, s.RedisHealthCheckActiveConnection, srv) + closer.Closers = append(closer.Closers, perSecondPool) } otherPool := NewClientImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisSocketType, s.RedisType, s.RedisUrl, s.RedisPoolSize, s.RedisPipelineWindow, s.RedisPipelineLimit, s.RedisTlsConfig, s.RedisHealthCheckActiveConnection, srv) + closer.Closers = append(closer.Closers, otherPool) return NewFixedRateLimitCacheImpl( otherPool, @@ -33,5 +37,5 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca s.CacheKeyPrefix, statsManager, s.StopCacheKeyIncrementWhenOverlimit, - ) + ), closer } diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index f645e58f..c079182f 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -32,10 +32,11 @@ import ( ) type Runner struct { - statsManager stats.Manager - settings settings.Settings - srv server.Server - mu sync.Mutex + statsManager stats.Manager + settings settings.Settings + srv server.Server + mu sync.Mutex + ratelimitCloser io.Closer } func NewRunner(s settings.Settings) Runner { @@ -80,7 +81,7 @@ func (runner *Runner) GetStatsStore() gostats.Store { return runner.statsManager.GetStatsStore() } -func createLimiter(srv server.Server, s settings.Settings, localCache *freecache.Cache, statsManager stats.Manager) limiter.RateLimitCache { +func createLimiter(srv server.Server, s settings.Settings, localCache *freecache.Cache, statsManager stats.Manager) (limiter.RateLimitCache, io.Closer) { switch s.BackendType { case "redis", "": return redis.NewRateLimiterCacheImplFromSettings( @@ -99,7 +100,7 @@ func createLimiter(srv server.Server, s settings.Settings, localCache *freecache rand.New(utils.NewLockedSource(time.Now().Unix())), localCache, srv.Scope(), - statsManager) + statsManager), &utils.MultiCloser{} // memcache client can't be closed default: logger.Fatalf("Invalid setting for BackendType: %s", s.BackendType) panic("This line should not be reachable") @@ -147,8 +148,11 @@ func (runner *Runner) Run() { runner.srv = srv runner.mu.Unlock() + limiter, limiterCloser := createLimiter(srv, s, localCache, runner.statsManager) + runner.ratelimitCloser = limiterCloser + service := ratelimit.NewService( - createLimiter(srv, s, localCache, runner.statsManager), + limiter, srv.Provider(), runner.statsManager, srv.HealthChecker(), @@ -184,4 +188,8 @@ func (runner *Runner) Stop() { if srv != nil { srv.Stop() } + + if runner.ratelimitCloser != nil { + _ = runner.ratelimitCloser.Close() + } } diff --git a/src/utils/multi_closer.go b/src/utils/multi_closer.go new file mode 100644 index 00000000..fead3f6b --- /dev/null +++ b/src/utils/multi_closer.go @@ -0,0 +1,18 @@ +package utils + +import ( + "errors" + "io" +) + +type MultiCloser struct { + Closers []io.Closer +} + +func (m *MultiCloser) Close() error { + var e error + for _, closer := range m.Closers { + e = errors.Join(closer.Close()) + } + return e +} From b3b7c4b415a5b4c1cbaf2d8bc7062ffd6da3d054 Mon Sep 17 00:00:00 2001 From: jayme-github Date: Thu, 1 Aug 2024 05:35:59 +0200 Subject: [PATCH 153/181] Add CertProvider to hot reload TLS certs for gRPC service (#587) Signed-off-by: Janis Meybohm --- Makefile | 7 +- README.md | 17 ++++- src/provider/cert_provider.go | 109 +++++++++++++++++++++++++++ src/server/server_impl.go | 49 ++++++------ test/integration/integration_test.go | 87 +++++++++++++++++++++ 5 files changed, 241 insertions(+), 28 deletions(-) create mode 100644 src/provider/cert_provider.go diff --git a/Makefile b/Makefile index 2f98413d..342e2beb 100644 --- a/Makefile +++ b/Makefile @@ -11,6 +11,11 @@ BUILDX_PLATFORMS := linux/amd64,linux/arm64/v8 # Root dir returns absolute path of current directory. It has a trailing "/". PROJECT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) export PROJECT_DIR +ifneq ($(shell docker compose version 2>/dev/null),) + DOCKER_COMPOSE=docker compose +else + DOCKER_COMPOSE=docker-compose +endif .PHONY: bootstrap bootstrap: ; @@ -142,7 +147,7 @@ docker_multiarch_push: docker_multiarch_image .PHONY: integration_tests integration_tests: - docker-compose --project-directory $(PWD) -f integration-test/docker-compose-integration-test.yml up --build --exit-code-from tester + $(DOCKER_COMPOSE) --project-directory $(PWD) -f integration-test/docker-compose-integration-test.yml up --build --exit-code-from tester .PHONY: precommit_install precommit_install: diff --git a/README.md b/README.md index 25a5daee..e4bc1558 100644 --- a/README.md +++ b/README.md @@ -1109,17 +1109,26 @@ otelcol-contrib --config examples/otlp-collector/config.yaml docker run -d --name jaeger -p 16686:16686 -p 14250:14250 jaegertracing/all-in-one:1.33 ``` +# TLS + +Ratelimit supports TLS for it's gRPC endpoint. + +The following environment variables control the TLS feature: + +1. `GRPC_SERVER_USE_TLS` - Enables gRPC connections to server over TLS +1. `GRPC_SERVER_TLS_CERT` - Path to the file containing the server cert chain +1. `GRPC_SERVER_TLS_KEY` - Path to the file containing the server private key + +Ratelimit uses [goruntime](https://github.com/lyft/goruntime) to watch the TLS certificate and key and will hot reload them on changes. + # mTLS Ratelimit supports mTLS when Envoy sends requests to the service. -The following environment variables control the mTLS feature: +TLS must be enabled on the gRPC endpoint in order for mTLS to work see [TLS](#TLS). The following variables can be set to enable mTLS on the Ratelimit service. -1. `GRPC_SERVER_USE_TLS` - Enables gprc connections to server over TLS -1. `GRPC_SERVER_TLS_CERT` - Path to the file containing the server cert chain -1. `GRPC_SERVER_TLS_KEY` - Path to the file containing the server private key 1. `GRPC_CLIENT_TLS_CACERT` - Path to the file containing the client CA certificate. 1. `GRPC_CLIENT_TLS_SAN` - (Optional) DNS Name to validate from the client cert during mTLS auth diff --git a/src/provider/cert_provider.go b/src/provider/cert_provider.go new file mode 100644 index 00000000..321a14fa --- /dev/null +++ b/src/provider/cert_provider.go @@ -0,0 +1,109 @@ +package provider + +import ( + "crypto/tls" + "path/filepath" + "sync" + + "github.com/lyft/goruntime/loader" + gostats "github.com/lyft/gostats" + logger "github.com/sirupsen/logrus" + + "github.com/envoyproxy/ratelimit/src/settings" +) + +// CertProvider will watch certDirectory for changes via goruntime/loader and reload the cert and key files +type CertProvider struct { + settings settings.Settings + runtime loader.IFace + runtimeUpdateEvent chan int + rootStore gostats.Store + certLock sync.RWMutex + cert *tls.Certificate + certDirectory string + certFile string + keyFile string +} + +// GetCertificateFunc returns a function compatible with tls.Config.GetCertificate, fetching the current certificate +func (p *CertProvider) GetCertificateFunc() func(*tls.ClientHelloInfo) (*tls.Certificate, error) { + return func(*tls.ClientHelloInfo) (*tls.Certificate, error) { + p.certLock.RLock() + defer p.certLock.RUnlock() + return p.cert, nil + } +} + +func (p *CertProvider) watch() { + p.runtime.AddUpdateCallback(p.runtimeUpdateEvent) + + go func() { + for { + logger.Debugf("CertProvider: waiting for runtime update") + <-p.runtimeUpdateEvent + logger.Debugf("CertProvider: got runtime update and reloading config") + p.reloadCert() + } + }() +} + +// reloadCert loads the cert and key files and updates the tls.Certificate in memory +func (p *CertProvider) reloadCert() { + tlsKeyPair, err := tls.LoadX509KeyPair(p.certFile, p.keyFile) + if err != nil { + logger.Errorf("CertProvider failed to load TLS key pair (%s, %s): %v", p.certFile, p.keyFile, err) + // panic in case there is no cert already loaded as this would mean starting up without TLS + if p.cert == nil { + logger.Fatalf("CertProvider failed to load any certificate, exiting.") + } + return // keep the old cert if we have one + } + p.certLock.Lock() + defer p.certLock.Unlock() + p.cert = &tlsKeyPair + logger.Infof("CertProvider reloaded cert from (%s, %s)", p.certFile, p.keyFile) +} + +// setupRuntime sets up the goruntime loader to watch the certDirectory +// Will panic if it fails to set up the loader +func (p *CertProvider) setupRuntime() { + var err error + + // runtimePath is the parent folder of certPath + runtimePath := filepath.Dir(p.certDirectory) + // runtimeSubdirectory is the name of the folder to watch, containing the certs + runtimeSubdirectory := filepath.Base(p.certDirectory) + + p.runtime, err = loader.New2( + runtimePath, + runtimeSubdirectory, + p.rootStore.ScopeWithTags("certs", p.settings.ExtraTags), + &loader.DirectoryRefresher{}, + loader.IgnoreDotFiles) + + if err != nil { + logger.Fatalf("Failed to set up goruntime loader: %v", err) + } +} + +// NewCertProvider creates a new CertProvider +// Will panic if it fails to set up gruntime or fails to load the initial certificate +func NewCertProvider(settings settings.Settings, rootStore gostats.Store, certFile, keyFile string) *CertProvider { + certDirectory := filepath.Dir(certFile) + if certDirectory != filepath.Dir(keyFile) { + logger.Fatalf("certFile and keyFile must be in the same directory") + } + p := &CertProvider{ + settings: settings, + runtimeUpdateEvent: make(chan int), + rootStore: rootStore, + certDirectory: certDirectory, + certFile: certFile, + keyFile: keyFile, + } + p.setupRuntime() + // Initially load the certificate (or panic) + p.reloadCert() + go p.watch() + return p +} diff --git a/src/server/server_impl.go b/src/server/server_impl.go index 0b42b40f..f2341917 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -26,7 +26,6 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/gorilla/mux" reuseport "github.com/kavu/go_reuseport" - "github.com/lyft/goruntime/loader" gostats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" "google.golang.org/grpc" @@ -58,20 +57,20 @@ const ( ) type server struct { - httpAddress string - grpcAddress string - grpcListenType grpcListenType - debugAddress string - router *mux.Router - grpcServer *grpc.Server - store gostats.Store - scope gostats.Scope - provider provider.RateLimitConfigProvider - runtime loader.IFace - debugListener serverDebugListener - httpServer *http.Server - listenerMu sync.Mutex - health *HealthChecker + httpAddress string + grpcAddress string + grpcListenType grpcListenType + debugAddress string + router *mux.Router + grpcServer *grpc.Server + store gostats.Store + scope gostats.Scope + provider provider.RateLimitConfigProvider + debugListener serverDebugListener + httpServer *http.Server + listenerMu sync.Mutex + health *HealthChecker + grpcCertProvider *provider.CertProvider } func (server *server) AddDebugHttpEndpoint(path string, help string, handler http.HandlerFunc) { @@ -242,6 +241,14 @@ func newServer(s settings.Settings, name string, statsManager stats.Manager, loc ret := new(server) + // setup stats + ret.store = statsManager.GetStatsStore() + ret.scope = ret.store.ScopeWithTags(name, s.ExtraTags) + ret.store.AddStatGenerator(gostats.NewRuntimeStats(ret.scope.Scope("go"))) + if localCache != nil { + ret.store.AddStatGenerator(limiter.NewLocalCacheStats(localCache, ret.scope.Scope("localcache"))) + } + keepaliveOpt := grpc.KeepaliveParams(keepalive.ServerParameters{ MaxConnectionAge: s.GrpcMaxConnectionAge, MaxConnectionAgeGrace: s.GrpcMaxConnectionAgeGrace, @@ -256,6 +263,10 @@ func newServer(s settings.Settings, name string, statsManager stats.Manager, loc } if s.GrpcServerUseTLS { grpcServerTlsConfig := s.GrpcServerTlsConfig + ret.grpcCertProvider = provider.NewCertProvider(s, ret.store, s.GrpcServerTlsCert, s.GrpcServerTlsKey) + // Remove the static certificates and use the provider via the GetCertificate function + grpcServerTlsConfig.Certificates = nil + grpcServerTlsConfig.GetCertificate = ret.grpcCertProvider.GetCertificateFunc() // Verify client SAN if provided if s.GrpcClientTlsSAN != "" { grpcServerTlsConfig.VerifyPeerCertificate = verifyClient(grpcServerTlsConfig.ClientCAs, s.GrpcClientTlsSAN) @@ -275,14 +286,6 @@ func newServer(s settings.Settings, name string, statsManager stats.Manager, loc } ret.debugAddress = net.JoinHostPort(s.DebugHost, strconv.Itoa(s.DebugPort)) - // setup stats - ret.store = statsManager.GetStatsStore() - ret.scope = ret.store.ScopeWithTags(name, s.ExtraTags) - ret.store.AddStatGenerator(gostats.NewRuntimeStats(ret.scope.Scope("go"))) - if localCache != nil { - ret.store.AddStatGenerator(limiter.NewLocalCacheStats(localCache, ret.scope.Scope("localcache"))) - } - // setup config provider ret.provider = getProviderImpl(s, statsManager, ret.store) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index fda35cd9..5a8379a4 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -4,6 +4,7 @@ package integration_test import ( "crypto/tls" + "crypto/x509" "fmt" "io" "math/rand" @@ -294,6 +295,92 @@ func Test_mTLS(t *testing.T) { defer conn.Close() } +func TestReloadGRPCServerCerts(t *testing.T) { + common.WithMultiRedis(t, []common.RedisConfig{ + {Port: 6383}, + }, func() { + s := makeSimpleRedisSettings(6383, 6380, false, 0) + assert := assert.New(t) + // TLS setup initially used to configure the server + initialServerCAFile, initialServerCertFile, initialServerCertKey, err := mTLSSetup(utils.ServerCA) + assert.NoError(err) + // Second TLS setup that will replace the above during test + newServerCAFile, newServerCertFile, newServerCertKey, err := mTLSSetup(utils.ServerCA) + assert.NoError(err) + // Create CertPools and tls.Configs for both CAs + initialCaCert, err := os.ReadFile(initialServerCAFile) + assert.NoError(err) + initialCertPool := x509.NewCertPool() + initialCertPool.AppendCertsFromPEM(initialCaCert) + initialTlsConfig := &tls.Config{ + RootCAs: initialCertPool, + } + newCaCert, err := os.ReadFile(newServerCAFile) + assert.NoError(err) + newCertPool := x509.NewCertPool() + newCertPool.AppendCertsFromPEM(newCaCert) + newTlsConfig := &tls.Config{ + RootCAs: newCertPool, + } + connStr := fmt.Sprintf("localhost:%v", s.GrpcPort) + + // Set up ratelimit with the initial certificate + s.GrpcServerUseTLS = true + s.GrpcServerTlsCert = initialServerCertFile + s.GrpcServerTlsKey = initialServerCertKey + settings.GrpcServerTlsConfig()(&s) + runner := startTestRunner(t, s) + defer runner.Stop() + + // Ensure TLS validation works with the initial CA in cert pool + t.Run("WithInitialCert", func(t *testing.T) { + conn, err := tls.Dial("tcp", connStr, initialTlsConfig) + assert.NoError(err) + conn.Close() + }) + + // Ensure TLS validation fails with the new CA in cert pool + t.Run("WithNewCertFail", func(t *testing.T) { + conn, err := tls.Dial("tcp", connStr, newTlsConfig) + assert.Error(err) + if err == nil { + conn.Close() + } + }) + + // Replace the initial certificate with the new one + err = os.Rename(newServerCertFile, initialServerCertFile) + assert.NoError(err) + err = os.Rename(newServerCertKey, initialServerCertKey) + assert.NoError(err) + + // Ensure TLS validation works with the new CA in cert pool + t.Run("WithNewCertOK", func(t *testing.T) { + // If this takes longer than 10s, something is probably wrong + wait := 10 + for i := 0; i < wait; i++ { + // Ensure the new certificate is being used + conn, err := tls.Dial("tcp", connStr, newTlsConfig) + if err == nil { + conn.Close() + break + } + time.Sleep(1 * time.Second) + } + assert.NoError(err) + }) + + // Ensure TLS validation fails with the initial CA in cert pool + t.Run("WithInitialCertFail", func(t *testing.T) { + conn, err := tls.Dial("tcp", connStr, initialTlsConfig) + assert.Error(err) + if err == nil { + conn.Close() + } + }) + }) +} + func testBasicConfigAuthTLS(perSecond bool, local_cache_size int) func(*testing.T) { s := makeSimpleRedisSettings(16381, 16382, perSecond, local_cache_size) s.RedisTlsConfig = &tls.Config{} From 94096740a25c3f5ad583da0a889f23e374d0e25b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 Jul 2024 21:37:14 -0600 Subject: [PATCH 154/181] Bump go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc (#639) Bumps [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc](https://github.com/open-telemetry/opentelemetry-go) from 1.27.0 to 1.28.0. - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.27.0...v1.28.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 12 ++++++------ go.sum | 24 ++++++++++++------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index bb08a15b..da22b0e2 100644 --- a/go.mod +++ b/go.mod @@ -22,10 +22,10 @@ require ( github.com/stretchr/testify v1.9.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 go.opentelemetry.io/otel v1.28.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 - go.opentelemetry.io/otel/sdk v1.27.0 + go.opentelemetry.io/otel/sdk v1.28.0 go.opentelemetry.io/otel/trace v1.28.0 golang.org/x/net v0.26.0 google.golang.org/grpc v1.64.0 @@ -52,11 +52,11 @@ require ( github.com/stretchr/objx v0.5.2 // indirect github.com/yuin/gopher-lua v1.1.1 // indirect go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/proto/otlp v1.2.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/sys v0.21.0 // indirect golang.org/x/text v0.16.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 408a802e..707caa78 100644 --- a/go.sum +++ b/go.sum @@ -131,20 +131,20 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.5 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= -go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= -go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -222,10 +222,10 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 h1:P8OJ/WCl/Xo4E4zoe4/bifHpSmmKwARqyqE4nW6J2GQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 h1:Q2RxlXqh1cgzzUgV261vBO2jI5R/3DD1J2pM0nI4NhU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= +google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= From dfca2641b88effdeca300741c84cc63fd16f85f5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 Jul 2024 21:44:46 -0600 Subject: [PATCH 155/181] Bump golang from 1.22.4 to 1.22.5 in /examples/xds-sotw-config-server (#647) Bumps golang from 1.22.4 to 1.22.5. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/xds-sotw-config-server/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/xds-sotw-config-server/Dockerfile b/examples/xds-sotw-config-server/Dockerfile index c92b7fe0..da64e734 100644 --- a/examples/xds-sotw-config-server/Dockerfile +++ b/examples/xds-sotw-config-server/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.4@sha256:969349b8121a56d51c74f4c273ab974c15b3a8ae246a5cffc1df7d28b66cf978 AS build +FROM golang:1.22.5@sha256:fcae9e0e7313c6467a7c6632ebb5e5fab99bd39bd5eb6ee34a211353e647827a AS build WORKDIR /xds-server COPY . . From 803b65cc26dca8b2f5564b30e4451a24db9103f5 Mon Sep 17 00:00:00 2001 From: zirain Date: Sat, 3 Aug 2024 06:49:33 +0800 Subject: [PATCH 156/181] chore: run gofumpt (#664) Signed-off-by: zirain --- README.md | 1 + src/config/config_impl.go | 19 +++++++++---------- src/limiter/base_limiter.go | 15 ++++++++++----- src/limiter/cache_key.go | 4 ++-- src/memcached/cache_impl.go | 13 ++++++++----- src/provider/cert_provider.go | 1 - src/redis/driver_impl.go | 3 ++- src/redis/fixed_cache_impl.go | 7 ++++--- src/service/ratelimit.go | 16 ++++++++-------- test/memcached/cache_impl_test.go | 3 ++- test/redis/fixed_cache_impl_test.go | 3 ++- test/server/server_impl_test.go | 4 ++-- 12 files changed, 50 insertions(+), 39 deletions(-) diff --git a/README.md b/README.md index e4bc1558..bbfa4347 100644 --- a/README.md +++ b/README.md @@ -60,6 +60,7 @@ - [Memcache](#memcache) - [Custom headers](#custom-headers) - [Tracing](#tracing) +- [TLS](#tls) - [mTLS](#mtls) - [Contact](#contact) diff --git a/src/config/config_impl.go b/src/config/config_impl.go index 7ada07e1..45c276b4 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -77,8 +77,8 @@ var validKeys = map[string]bool{ // @param unlimited supplies whether the rate limit is unlimited // @return the new config entry. func NewRateLimit(requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stats.RateLimitStats, - unlimited bool, shadowMode bool, name string, replaces []string, detailedMetric bool) *RateLimit { - + unlimited bool, shadowMode bool, name string, replaces []string, detailedMetric bool, +) *RateLimit { return &RateLimit{ FullKey: rlStats.GetKey(), Stats: rlStats, @@ -144,8 +144,7 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p if descriptorConfig.RateLimit != nil { unlimited := descriptorConfig.RateLimit.Unlimited - value, present := - pb.RateLimitResponse_RateLimit_Unit_value[strings.ToUpper(descriptorConfig.RateLimit.Unit)] + value, present := pb.RateLimitResponse_RateLimit_Unit_value[strings.ToUpper(descriptorConfig.RateLimit.Unit)] validUnit := present && value != int32(pb.RateLimitResponse_RateLimit_UNKNOWN) if unlimited { @@ -278,8 +277,8 @@ func (this *rateLimitConfigImpl) Dump() string { } func (this *rateLimitConfigImpl) GetLimit( - ctx context.Context, domain string, descriptor *pb_struct.RateLimitDescriptor) *RateLimit { - + ctx context.Context, domain string, descriptor *pb_struct.RateLimitDescriptor, +) *RateLimit { logger.Debugf("starting get limit lookup") var rateLimit *RateLimit = nil value := this.domains[domain] @@ -420,8 +419,8 @@ func ConfigFileContentToYaml(fileName, content string) *YamlRoot { // @param mergeDomainConfigs defines whether multiple configurations referencing the same domain will be merged or rejected throwing an error. // @return a new config. func NewRateLimitConfigImpl( - configs []RateLimitConfigToLoad, statsManager stats.Manager, mergeDomainConfigs bool) RateLimitConfig { - + configs []RateLimitConfigToLoad, statsManager stats.Manager, mergeDomainConfigs bool, +) RateLimitConfig { ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}, statsManager, mergeDomainConfigs} for _, config := range configs { ret.loadConfig(config) @@ -433,8 +432,8 @@ func NewRateLimitConfigImpl( type rateLimitConfigLoaderImpl struct{} func (this *rateLimitConfigLoaderImpl) Load( - configs []RateLimitConfigToLoad, statsManager stats.Manager, mergeDomainConfigs bool) RateLimitConfig { - + configs []RateLimitConfigToLoad, statsManager stats.Manager, mergeDomainConfigs bool, +) RateLimitConfig { return NewRateLimitConfigImpl(configs, statsManager, mergeDomainConfigs) } diff --git a/src/limiter/base_limiter.go b/src/limiter/base_limiter.go index b76366cf..18f0b83d 100644 --- a/src/limiter/base_limiter.go +++ b/src/limiter/base_limiter.go @@ -33,7 +33,8 @@ type LimitInfo struct { } func NewRateLimitInfo(limit *config.RateLimit, limitBeforeIncrease uint32, limitAfterIncrease uint32, - nearLimitThreshold uint32, overLimitThreshold uint32) *LimitInfo { + nearLimitThreshold uint32, overLimitThreshold uint32, +) *LimitInfo { return &LimitInfo{ limit: limit, limitBeforeIncrease: limitBeforeIncrease, limitAfterIncrease: limitAfterIncrease, nearLimitThreshold: nearLimitThreshold, overLimitThreshold: overLimitThreshold, @@ -43,7 +44,8 @@ func NewRateLimitInfo(limit *config.RateLimit, limitBeforeIncrease uint32, limit // Generates cache keys for given rate limit request. Each cache key is represented by a concatenation of // domain, descriptor and current timestamp. func (this *BaseRateLimiter) GenerateCacheKeys(request *pb.RateLimitRequest, - limits []*config.RateLimit, hitsAddend uint32) []CacheKey { + limits []*config.RateLimit, hitsAddend uint32, +) []CacheKey { assert.Assert(len(request.Descriptors) == len(limits)) cacheKeys := make([]CacheKey, len(request.Descriptors)) now := this.timeSource.UnixNow() @@ -79,7 +81,8 @@ func (this *BaseRateLimiter) IsOverLimitThresholdReached(limitInfo *LimitInfo) b // Generates response descriptor status based on cache key, over the limit with local cache, over the limit and // near the limit thresholds. Thresholds are checked in order and are mutually exclusive. func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo *LimitInfo, - isOverLimitWithLocalCache bool, hitsAddend uint32) *pb.RateLimitResponse_DescriptorStatus { + isOverLimitWithLocalCache bool, hitsAddend uint32, +) *pb.RateLimitResponse_DescriptorStatus { if key == "" { return this.generateResponseDescriptorStatus(pb.RateLimitResponse_OK, nil, 0) @@ -140,7 +143,8 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo * } func NewBaseRateLimit(timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, - localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, statsManager stats.Manager) *BaseRateLimiter { + localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, statsManager stats.Manager, +) *BaseRateLimiter { return &BaseRateLimiter{ timeSource: timeSource, JitterRand: jitterRand, @@ -194,7 +198,8 @@ func (this *BaseRateLimiter) increaseShadowModeStats(isOverLimitWithLocalCache b } func (this *BaseRateLimiter) generateResponseDescriptorStatus(responseCode pb.RateLimitResponse_Code, - limit *pb.RateLimitResponse_RateLimit, limitRemaining uint32) *pb.RateLimitResponse_DescriptorStatus { + limit *pb.RateLimitResponse_RateLimit, limitRemaining uint32, +) *pb.RateLimitResponse_DescriptorStatus { if limit != nil { return &pb.RateLimitResponse_DescriptorStatus{ Code: responseCode, diff --git a/src/limiter/cache_key.go b/src/limiter/cache_key.go index 4aeab204..7beadd54 100644 --- a/src/limiter/cache_key.go +++ b/src/limiter/cache_key.go @@ -46,8 +46,8 @@ func isPerSecondLimit(unit pb.RateLimitResponse_RateLimit_Unit) bool { // @param now supplies the current unix time. // @return CacheKey struct. func (this *CacheKeyGenerator) GenerateCacheKey( - domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit, now int64) CacheKey { - + domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit, now int64, +) CacheKey { if limit == nil { return CacheKey{ Key: "", diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index 3b887a60..d4f65f75 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -66,8 +66,8 @@ var _ limiter.RateLimitCache = (*rateLimitMemcacheImpl)(nil) func (this *rateLimitMemcacheImpl) DoLimit( ctx context.Context, request *pb.RateLimitRequest, - limits []*config.RateLimit) []*pb.RateLimitResponse_DescriptorStatus { - + limits []*config.RateLimit, +) []*pb.RateLimitResponse_DescriptorStatus { logger.Debugf("starting cache lookup") // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. @@ -150,7 +150,8 @@ func (this *rateLimitMemcacheImpl) DoLimit( } func (this *rateLimitMemcacheImpl) increaseAsync(cacheKeys []limiter.CacheKey, isOverLimitWithLocalCache []bool, - limits []*config.RateLimit, hitsAddend uint64) { + limits []*config.RateLimit, hitsAddend uint64, +) { defer this.waitGroup.Done() for i, cacheKey := range cacheKeys { if cacheKey.Key == "" || isOverLimitWithLocalCache[i] { @@ -301,7 +302,8 @@ func runAsync(task func()) { } func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRand *rand.Rand, - expirationJitterMaxSeconds int64, localCache *freecache.Cache, statsManager stats.Manager, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache { + expirationJitterMaxSeconds int64, localCache *freecache.Cache, statsManager stats.Manager, nearLimitRatio float32, cacheKeyPrefix string, +) limiter.RateLimitCache { return &rateLimitMemcacheImpl{ client: client, timeSource: timeSource, @@ -314,7 +316,8 @@ func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRan } func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.TimeSource, jitterRand *rand.Rand, - localCache *freecache.Cache, scope gostats.Scope, statsManager stats.Manager) limiter.RateLimitCache { + localCache *freecache.Cache, scope gostats.Scope, statsManager stats.Manager, +) limiter.RateLimitCache { return NewRateLimitCacheImpl( CollectStats(newMemcacheFromSettings(s), scope.Scope("memcache")), timeSource, diff --git a/src/provider/cert_provider.go b/src/provider/cert_provider.go index 321a14fa..79349489 100644 --- a/src/provider/cert_provider.go +++ b/src/provider/cert_provider.go @@ -80,7 +80,6 @@ func (p *CertProvider) setupRuntime() { p.rootStore.ScopeWithTags("certs", p.settings.ExtraTags), &loader.DirectoryRefresher{}, loader.IgnoreDotFiles) - if err != nil { logger.Fatalf("Failed to set up goruntime loader: %v", err) } diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index 5f70bad7..4b10615a 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -71,7 +71,8 @@ func checkError(err error) { } func NewClientImpl(scope stats.Scope, useTls bool, auth, redisSocketType, redisType, url string, poolSize int, - pipelineWindow time.Duration, pipelineLimit int, tlsConfig *tls.Config, healthCheckActiveConnection bool, srv server.Server) Client { + pipelineWindow time.Duration, pipelineLimit int, tlsConfig *tls.Config, healthCheckActiveConnection bool, srv server.Server, +) Client { maskedUrl := utils.MaskCredentialsInUrl(url) logger.Warnf("connecting to redis on %s with pool size %d", maskedUrl, poolSize) diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index 4ec34b3d..8c551e0c 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -44,8 +44,8 @@ func pipelineAppendtoGet(client Client, pipeline *Pipeline, key string, result * func (this *fixedRateLimitCacheImpl) DoLimit( ctx context.Context, request *pb.RateLimitRequest, - limits []*config.RateLimit) []*pb.RateLimitResponse_DescriptorStatus { - + limits []*config.RateLimit, +) []*pb.RateLimitResponse_DescriptorStatus { logger.Debugf("starting cache lookup") // request.HitsAddend could be 0 (default value) if not specified by the caller in the RateLimit request. @@ -218,7 +218,8 @@ func (this *fixedRateLimitCacheImpl) Flush() {} func NewFixedRateLimitCacheImpl(client Client, perSecondClient Client, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, statsManager stats.Manager, - stopCacheKeyIncrementWhenOverlimit bool) limiter.RateLimitCache { + stopCacheKeyIncrementWhenOverlimit bool, +) limiter.RateLimitCache { return &fixedRateLimitCacheImpl{ client: client, perSecondClient: perSecondClient, diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index 0268f1bc..ea26bf0d 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -177,8 +177,8 @@ func (this *service) constructLimitsToCheck(request *pb.RateLimitRequest, ctx co const MaxUint32 = uint32(1<<32 - 1) func (this *service) shouldRateLimitWorker( - ctx context.Context, request *pb.RateLimitRequest) *pb.RateLimitResponse { - + ctx context.Context, request *pb.RateLimitRequest, +) *pb.RateLimitResponse { checkServiceErr(request.Domain != "", "rate limit domain must not be empty") checkServiceErr(len(request.Descriptors) != 0, "rate limit descriptor list must not be empty") @@ -258,8 +258,8 @@ func (this *service) rateLimitRemainingHeader(descriptor *pb.RateLimitResponse_D } func (this *service) rateLimitResetHeader( - descriptor *pb.RateLimitResponse_DescriptorStatus) *core.HeaderValue { - + descriptor *pb.RateLimitResponse_DescriptorStatus, +) *core.HeaderValue { return &core.HeaderValue{ Key: this.customHeaderResetHeader, Value: strconv.FormatInt(utils.CalculateReset(&descriptor.CurrentLimit.Unit, this.customHeaderClock).GetSeconds(), 10), @@ -268,8 +268,8 @@ func (this *service) rateLimitResetHeader( func (this *service) ShouldRateLimit( ctx context.Context, - request *pb.RateLimitRequest) (finalResponse *pb.RateLimitResponse, finalError error) { - + request *pb.RateLimitRequest, +) (finalResponse *pb.RateLimitResponse, finalError error) { // Generate trace _, span := tracer.Start(ctx, "ShouldRateLimit Execution", trace.WithAttributes( @@ -317,8 +317,8 @@ func (this *service) GetCurrentConfig() (config.RateLimitConfig, bool) { } func NewService(cache limiter.RateLimitCache, configProvider provider.RateLimitConfigProvider, statsManager stats.Manager, - health *server.HealthChecker, clock utils.TimeSource, shadowMode, forceStart bool, healthyWithAtLeastOneConfigLoad bool) RateLimitServiceServer { - + health *server.HealthChecker, clock utils.TimeSource, shadowMode, forceStart bool, healthyWithAtLeastOneConfigLoad bool, +) RateLimitServiceServer { newService := &service{ configLock: sync.RWMutex{}, configUpdateEvent: configProvider.ConfigUpdateEvent(), diff --git a/test/memcached/cache_impl_test.go b/test/memcached/cache_impl_test.go index 14022ecb..15d90fd4 100644 --- a/test/memcached/cache_impl_test.go +++ b/test/memcached/cache_impl_test.go @@ -183,7 +183,8 @@ func TestMemcachedGetError(t *testing.T) { func testLocalCacheStats(localCacheStats stats.StatGenerator, statsStore stats.Store, sink *common.TestStatSink, expectedHitCount int, expectedMissCount int, expectedLookUpCount int, expectedExpiredCount int, - expectedEntryCount int) func(*testing.T) { + expectedEntryCount int, +) func(*testing.T) { return func(t *testing.T) { localCacheStats.GenerateStats() statsStore.Flush() diff --git a/test/redis/fixed_cache_impl_test.go b/test/redis/fixed_cache_impl_test.go index 8c7f3c47..45fe87b2 100644 --- a/test/redis/fixed_cache_impl_test.go +++ b/test/redis/fixed_cache_impl_test.go @@ -146,7 +146,8 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { func testLocalCacheStats(localCacheStats gostats.StatGenerator, statsStore gostats.Store, sink *common.TestStatSink, expectedHitCount int, expectedMissCount int, expectedLookUpCount int, expectedExpiredCount int, - expectedEntryCount int) func(*testing.T) { + expectedEntryCount int, +) func(*testing.T) { return func(t *testing.T) { localCacheStats.GenerateStats() statsStore.Flush() diff --git a/test/server/server_impl_test.go b/test/server/server_impl_test.go index 9896b798..899f5640 100644 --- a/test/server/server_impl_test.go +++ b/test/server/server_impl_test.go @@ -26,8 +26,8 @@ func assertHttpResponse(t *testing.T, requestBody string, expectedStatusCode int, expectedContentType string, - expectedResponseBody string) { - + expectedResponseBody string, +) { t.Helper() assert := assert.New(t) From 722431e7dd15b2ba10298f834fae14828e575aac Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 8 Aug 2024 09:25:48 -0600 Subject: [PATCH 157/181] Bump golang in /examples/xds-sotw-config-server (#674) Bumps golang from `fcae9e0` to `86a3c48`. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/xds-sotw-config-server/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/xds-sotw-config-server/Dockerfile b/examples/xds-sotw-config-server/Dockerfile index da64e734..89f5abc4 100644 --- a/examples/xds-sotw-config-server/Dockerfile +++ b/examples/xds-sotw-config-server/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.5@sha256:fcae9e0e7313c6467a7c6632ebb5e5fab99bd39bd5eb6ee34a211353e647827a AS build +FROM golang:1.22.5@sha256:86a3c48a61915a8c62c0e1d7594730399caa3feb73655dfe96c7bc17710e96cf AS build WORKDIR /xds-server COPY . . From 701e1c146d8fde337f4425057caea003845202f9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 8 Aug 2024 09:26:02 -0600 Subject: [PATCH 158/181] Bump google.golang.org/grpc from 1.64.0 to 1.65.0 (#673) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.64.0 to 1.65.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.64.0...v1.65.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 7 ++++--- go.sum | 14 ++++++++------ 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index da22b0e2..f9f6a8e3 100644 --- a/go.mod +++ b/go.mod @@ -28,18 +28,19 @@ require ( go.opentelemetry.io/otel/sdk v1.28.0 go.opentelemetry.io/otel/trace v1.28.0 golang.org/x/net v0.26.0 - google.golang.org/grpc v1.64.0 + google.golang.org/grpc v1.65.0 google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 v2.4.0 ) require ( + cel.dev/expr v0.15.0 // indirect github.com/Microsoft/go-winio v0.5.0 // indirect github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect diff --git a/go.sum b/go.sum index 707caa78..0181b8cd 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +cel.dev/expr v0.15.0 h1:O1jzfJCQBfL5BFoYktaxwIhuttaQPsVWerH9/EEKx0w= +cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= @@ -17,12 +19,12 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc= -github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coocood/freecache v1.2.4 h1:UdR6Yz/X1HW4fZOuH0Z94KwG851GWOSknua5VUbb/5M= github.com/coocood/freecache v1.2.4/go.mod h1:RBUWa/Cy+OHdfTGFEhEuE1pMCMX51Ncizj7rthiQ3vk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -231,8 +233,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 281d5fa8a71ac239d4118209a6f5a84d45b3cb6e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 8 Aug 2024 09:26:24 -0600 Subject: [PATCH 159/181] Bump go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc (#672) Bumps [go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc](https://github.com/open-telemetry/opentelemetry-go-contrib) from 0.52.0 to 0.53.0. - [Release notes](https://github.com/open-telemetry/opentelemetry-go-contrib/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go-contrib/compare/zpages/v0.52.0...zpages/v0.53.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f9f6a8e3..37e8e14b 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/mediocregopher/radix/v3 v3.8.1 github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.9.0 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 go.opentelemetry.io/otel v1.28.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 diff --git a/go.sum b/go.sum index 0181b8cd..49b0ddf7 100644 --- a/go.sum +++ b/go.sum @@ -129,8 +129,8 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= From 322b220647d9d0681d69396225da8593436d62cc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Aug 2024 10:36:06 -0700 Subject: [PATCH 160/181] build(deps): bump actions/upload-artifact from 4.3.4 to 4.3.6 (#679) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.3.4 to 4.3.6. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/0b2256b8c012f0828dc542b3febcab082c67f72b...834a144ee995460fba8ed112a2fc961b36a5ec5a) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/scorecard.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 077182da..c92aabe0 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -56,7 +56,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 with: name: SARIF file path: results.sarif From c58b4c83f2a94d663ffe94b149bc76fc100aae9d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Aug 2024 10:37:24 -0700 Subject: [PATCH 161/181] build(deps): bump golang in /examples/xds-sotw-config-server (#676) Bumps golang from 1.22.5 to 1.22.6. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/xds-sotw-config-server/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/xds-sotw-config-server/Dockerfile b/examples/xds-sotw-config-server/Dockerfile index 89f5abc4..1c5068f9 100644 --- a/examples/xds-sotw-config-server/Dockerfile +++ b/examples/xds-sotw-config-server/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.5@sha256:86a3c48a61915a8c62c0e1d7594730399caa3feb73655dfe96c7bc17710e96cf AS build +FROM golang:1.22.6@sha256:2bd56f00ff47baf33e64eae7996b65846c7cb5e0a46e0a882ef179fd89654afa AS build WORKDIR /xds-server COPY . . From 5e3841c8e7cc49105ae1ccad395522d09a09bf09 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Aug 2024 10:37:34 -0700 Subject: [PATCH 162/181] build(deps): bump golang from 1.22.5 to 1.22.6 (#677) Bumps golang from 1.22.5 to 1.22.6. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- Dockerfile.integration | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 1fac024a..6d826d86 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.5@sha256:86a3c48a61915a8c62c0e1d7594730399caa3feb73655dfe96c7bc17710e96cf AS build +FROM golang:1.22.6@sha256:2bd56f00ff47baf33e64eae7996b65846c7cb5e0a46e0a882ef179fd89654afa AS build WORKDIR /ratelimit ENV GOPROXY=https://proxy.golang.org diff --git a/Dockerfile.integration b/Dockerfile.integration index 20b76c1e..458dce18 100644 --- a/Dockerfile.integration +++ b/Dockerfile.integration @@ -1,5 +1,5 @@ # Running this docker image runs the integration tests. -FROM golang@sha256:969349b8121a56d51c74f4c273ab974c15b3a8ae246a5cffc1df7d28b66cf978 +FROM golang@sha256:2bd56f00ff47baf33e64eae7996b65846c7cb5e0a46e0a882ef179fd89654afa RUN apt-get update -y && apt-get install sudo stunnel4 redis memcached -y && rm -rf /var/lib/apt/lists/* From ae04944bb1e818639f6ed45ab3133962f7611a9b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Aug 2024 10:37:46 -0700 Subject: [PATCH 163/181] build(deps): bump github/codeql-action from 3.25.15 to 3.26.0 (#678) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.25.15 to 3.26.0. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/afb54ba388a7dca6ecae48f608c4ff05ff4cc77a...eb055d739abdc2e8de2e5f4ba1a8b246daa779aa) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecard.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index a41c8bb9..1a048251 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -35,14 +35,14 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15 + uses: github/codeql-action/init@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa # v3.26.0 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15 + uses: github/codeql-action/autobuild@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa # v3.26.0 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15 + uses: github/codeql-action/analyze@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa # v3.26.0 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index c92aabe0..138c836a 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -64,6 +64,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@afb54ba388a7dca6ecae48f608c4ff05ff4cc77a # v3.25.15 + uses: github/codeql-action/upload-sarif@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa # v3.26.0 with: sarif_file: results.sarif From 0e8a4fb38f6b84bc95355f540a28c5e76775d2ed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Aug 2024 10:38:15 -0700 Subject: [PATCH 164/181] Bump actions/setup-go from 5.0.1 to 5.0.2 (#666) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.1 to 5.0.2. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/cdcb36043654635271a94b9a6d1392de5bb323a7...0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 2 +- .github/workflows/main.yaml | 2 +- .github/workflows/pullrequest.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 1a048251..e47692d9 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -29,7 +29,7 @@ jobs: uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 3ad1bfba..cdd67434 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -47,7 +47,7 @@ jobs: with: python-version: "3.9" - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: "1.21.5" diff --git a/.github/workflows/pullrequest.yaml b/.github/workflows/pullrequest.yaml index 6cb4f4ca..211a2711 100644 --- a/.github/workflows/pullrequest.yaml +++ b/.github/workflows/pullrequest.yaml @@ -35,7 +35,7 @@ jobs: with: python-version: "3.9" - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: "1.21.5" From 26f28d78ccafaefe120cb698279ab2e8c4fc2c4f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Aug 2024 10:38:25 -0700 Subject: [PATCH 165/181] Bump docker/setup-buildx-action from 3.5.0 to 3.6.1 (#667) Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 3.5.0 to 3.6.1. - [Release notes](https://github.com/docker/setup-buildx-action/releases) - [Commits](https://github.com/docker/setup-buildx-action/compare/aa33708b10e362ff993539393ff100fa93ed6a27...988b5a0280414f521da01fcc63a27aeeb4b104db) --- updated-dependencies: - dependency-name: docker/setup-buildx-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/main.yaml | 2 +- .github/workflows/release.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index cdd67434..d4fabc22 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -26,7 +26,7 @@ jobs: - name: Set up Docker buildx id: buildx - uses: docker/setup-buildx-action@aa33708b10e362ff993539393ff100fa93ed6a27 # v3.5.0 + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: build and push docker image run: | diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 9381ae27..8193b01d 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -25,7 +25,7 @@ jobs: - name: Set up Docker buildx id: buildx - uses: docker/setup-buildx-action@aa33708b10e362ff993539393ff100fa93ed6a27 # v3.5.0 + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: build and push docker image run: | From 3a7d0e03f4ee05cbf65d187b6ac0865dd5c8faaa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Aug 2024 10:38:36 -0700 Subject: [PATCH 166/181] Bump ossf/scorecard-action from 2.3.3 to 2.4.0 (#669) Bumps [ossf/scorecard-action](https://github.com/ossf/scorecard-action) from 2.3.3 to 2.4.0. - [Release notes](https://github.com/ossf/scorecard-action/releases) - [Changelog](https://github.com/ossf/scorecard-action/blob/main/RELEASE.md) - [Commits](https://github.com/ossf/scorecard-action/compare/dc50aa9510b46c811795eb24b2f1ba02a914e534...62b2cac7ed8198b15735ed49ab1e5cf35480ba46) --- updated-dependencies: - dependency-name: ossf/scorecard-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/scorecard.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 138c836a..ea649e37 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -34,7 +34,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # v2.3.3 + uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # v2.4.0 with: results_file: results.sarif results_format: sarif From 28b1629a21e885bdd2b527d6a1c1de8483dc47d4 Mon Sep 17 00:00:00 2001 From: zirain Date: Tue, 17 Sep 2024 06:59:27 +0800 Subject: [PATCH 167/181] implemnt Prometheus sink (#681) Signed-off-by: zirain --- README.md | 104 ++++++++++++++++ go.mod | 8 ++ go.sum | 18 +++ src/service_cmd/runner/runner.go | 39 +++--- src/settings/settings.go | 4 + src/stats/prom/default_mapper.yaml | 89 ++++++++++++++ src/stats/prom/prometheus_sink.go | 160 +++++++++++++++++++++++++ src/stats/prom/prometheus_sink_test.go | 111 +++++++++++++++++ 8 files changed, 517 insertions(+), 16 deletions(-) create mode 100644 src/stats/prom/default_mapper.yaml create mode 100644 src/stats/prom/prometheus_sink.go create mode 100644 src/stats/prom/prometheus_sink_test.go diff --git a/README.md b/README.md index bbfa4347..fff18bae 100644 --- a/README.md +++ b/README.md @@ -47,6 +47,7 @@ - [DogStatsD](#dogstatsd) - [Example](#example) - [Continued example:](#continued-example) + - [Prometheus](#prometheus) - [HTTP Port](#http-port) - [/json endpoint](#json-endpoint) - [Debug Port](#debug-port) @@ -901,6 +902,109 @@ Then, declare additional rules for the `DESCRIPTOR` mogrifier 2. `DOG_STATSD_MOGRIFIER_HITS_NAME`: `ratelimit.service.rate_limit.$3` 3. `DOG_STATSD_MOGRIFIER_HITS_TAGS`: `domain:$1,descriptor:$2` +## Prometheus + +To enable Prometheus integration set: + +1. `USE_PROMETHEUS`: `true` to use [Prometheus](https://prometheus.io/) +2. `PROMETHEUS_ADDR`: The port to listen on for Prometheus metrics. Defaults to `:9090` +3. `PROMETHEUS_PATH`: The path to listen on for Prometheus metrics. Defaults to `/metrics` +4. `PROMETHEUS_MAPPER_YAML`: The path to the YAML file that defines the mapping from statsd to prometheus metrics. + +Define the mapping from statsd to prometheus metrics in a YAML file. +Find more information about the mapping in the [Metric Mapping and Configuration](https://github.com/prometheus/statsd_exporter?tab=readme-ov-file#metric-mapping-and-configuration). +The default setting is: + +```yaml +mappings: # Requires statsd exporter >= v0.6.0 since it uses the "drop" action. + - match: "ratelimit.service.rate_limit.*.*.near_limit" + name: "ratelimit_service_rate_limit_near_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + - match: "ratelimit.service.rate_limit.*.*.over_limit" + name: "ratelimit_service_rate_limit_over_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + - match: "ratelimit.service.rate_limit.*.*.total_hits" + name: "ratelimit_service_rate_limit_total_hits" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + - match: "ratelimit.service.rate_limit.*.*.within_limit" + name: "ratelimit_service_rate_limit_within_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + + - match: "ratelimit.service.rate_limit.*.*.*.near_limit" + name: "ratelimit_service_rate_limit_near_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" + - match: "ratelimit.service.rate_limit.*.*.*.over_limit" + name: "ratelimit_service_rate_limit_over_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" + - match: "ratelimit.service.rate_limit.*.*.*.total_hits" + name: "ratelimit_service_rate_limit_total_hits" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" + - match: "ratelimit.service.rate_limit.*.*.*.within_limit" + name: "ratelimit_service_rate_limit_within_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" + + - match: "ratelimit.service.call.should_rate_limit.*" + name: "ratelimit_service_should_rate_limit_error" + match_metric_type: counter + labels: + err_type: "$1" + + - match: "ratelimit_server.*.total_requests" + name: "ratelimit_service_total_requests" + match_metric_type: counter + labels: + grpc_method: "$1" + + - match: "ratelimit_server.*.response_time" + name: "ratelimit_service_response_time_seconds" + timer_type: histogram + labels: + grpc_method: "$1" + + - match: "ratelimit.service.config_load_success" + name: "ratelimit_service_config_load_success" + match_metric_type: counter + - match: "ratelimit.service.config_load_error" + name: "ratelimit_service_config_load_error" + match_metric_type: counter + + - match: "ratelimit.service.rate_limit.*.*.*.shadow_mode" + name: "ratelimit_service_rate_limit_shadow_mode" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" +``` + # HTTP Port The ratelimit service listens to HTTP 1.1 (by default on port 8080) with two endpoints: diff --git a/go.mod b/go.mod index 37e8e14b..b7b3051e 100644 --- a/go.mod +++ b/go.mod @@ -18,6 +18,8 @@ require ( github.com/lyft/goruntime v0.3.0 github.com/lyft/gostats v0.4.14 github.com/mediocregopher/radix/v3 v3.8.1 + github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/statsd_exporter v0.26.1 github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 @@ -37,6 +39,7 @@ require ( cel.dev/expr v0.15.0 // indirect github.com/Microsoft/go-winio v0.5.0 // indirect github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 // indirect + github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -44,12 +47,17 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_model v0.6.0 // indirect + github.com/prometheus/common v0.48.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/yuin/gopher-lua v1.1.1 // indirect go.opentelemetry.io/otel/metric v1.28.0 // indirect diff --git a/go.sum b/go.sum index 49b0ddf7..b54c5c8b 100644 --- a/go.sum +++ b/go.sum @@ -11,6 +11,8 @@ github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302/go.mod h1:SGn github.com/alicebob/miniredis/v2 v2.33.0 h1:uvTF0EDeu9RLnUEG27Db5I68ESoIxTiXbNUiji6lZrA= github.com/alicebob/miniredis/v2 v2.33.0/go.mod h1:MhP4a3EU7aENRi9aO+tHfTBZicLqQevyi/DJpoj6mi0= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 h1:N7oVaKyGp8bttX0bfZGmcGkjz7DLQXhAn3DNd3T0ous= github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874/go.mod h1:r5xuitiExdLAJ09PR7vBVENGvp4ZuTBeWTGtxuX3K+c= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= @@ -42,7 +44,11 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -51,6 +57,8 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= @@ -101,7 +109,17 @@ github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 h1:pH+U6p github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= +github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/statsd_exporter v0.26.1 h1:ucbIAdPmwAUcA+dU+Opok8Qt81Aw8HanlO+2N/Wjv7w= +github.com/prometheus/statsd_exporter v0.26.1/go.mod h1:XlDdjAmRmx3JVvPPYuFNUg+Ynyb5kR69iPPkQjxXFMk= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index c079182f..ada57757 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -9,25 +9,22 @@ import ( "sync" "time" - "github.com/envoyproxy/ratelimit/src/godogstats" - "github.com/envoyproxy/ratelimit/src/metrics" - "github.com/envoyproxy/ratelimit/src/stats" - "github.com/envoyproxy/ratelimit/src/trace" - - gostats "github.com/lyft/gostats" - "github.com/coocood/freecache" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - + gostats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" + "github.com/envoyproxy/ratelimit/src/godogstats" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/memcached" + "github.com/envoyproxy/ratelimit/src/metrics" "github.com/envoyproxy/ratelimit/src/redis" "github.com/envoyproxy/ratelimit/src/server" ratelimit "github.com/envoyproxy/ratelimit/src/service" "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/stats" + "github.com/envoyproxy/ratelimit/src/stats/prom" + "github.com/envoyproxy/ratelimit/src/trace" "github.com/envoyproxy/ratelimit/src/utils" ) @@ -42,14 +39,14 @@ type Runner struct { func NewRunner(s settings.Settings) Runner { var store gostats.Store - if s.DisableStats { + switch { + case s.DisableStats: logger.Info("Stats disabled") store = gostats.NewStore(gostats.NewNullSink(), false) - } else if s.UseDogStatsd { - if s.UseStatsd { - logger.Fatalf("Error: unable to use both stats sink at the same time. Set either USE_DOG_STATSD or USE_STATSD but not both.") + case s.UseDogStatsd: + if s.UseStatsd || s.UsePrometheus { + logger.Fatalf("Error: unable to use more than one stats sink at the same time. Set one of USE_DOG_STATSD, USE_STATSD, USE_PROMETHEUS.") } - var err error sink, err := godogstats.NewSink( godogstats.WithStatsdHost(s.StatsdHost), godogstats.WithStatsdPort(s.StatsdPort), @@ -59,10 +56,20 @@ func NewRunner(s settings.Settings) Runner { } logger.Info("Stats initialized for dogstatsd") store = gostats.NewStore(sink, false) - } else if s.UseStatsd { + case s.UseStatsd: + if s.UseDogStatsd || s.UsePrometheus { + logger.Fatalf("Error: unable to use more than one stats sink at the same time. Set one of USE_DOG_STATSD, USE_STATSD, USE_PROMETHEUS.") + } logger.Info("Stats initialized for statsd") store = gostats.NewStore(gostats.NewTCPStatsdSink(gostats.WithStatsdHost(s.StatsdHost), gostats.WithStatsdPort(s.StatsdPort)), false) - } else { + case s.UsePrometheus: + if s.UseDogStatsd || s.UseStatsd { + logger.Fatalf("Error: unable to use more than one stats sink at the same time. Set one of USE_DOG_STATSD, USE_STATSD, USE_PROMETHEUS.") + } + logger.Info("Stats initialized for Prometheus") + store = gostats.NewStore(prom.NewPrometheusSink(prom.WithAddr(s.PrometheusAddr), + prom.WithPath(s.PrometheusPath), prom.WithMapperYamlPath(s.PrometheusMapperYaml)), false) + default: logger.Info("Stats initialized for stdout") store = gostats.NewStore(gostats.NewLoggingSink(), false) } diff --git a/src/settings/settings.go b/src/settings/settings.go index 49da0ea5..9febf7d9 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -87,6 +87,10 @@ type Settings struct { ExtraTags map[string]string `envconfig:"EXTRA_TAGS" default:""` StatsFlushInterval time.Duration `envconfig:"STATS_FLUSH_INTERVAL" default:"10s"` DisableStats bool `envconfig:"DISABLE_STATS" default:"false"` + UsePrometheus bool `envconfig:"USE_PROMETHEUS" default:"false"` + PrometheusAddr string `envconfig:"PROMETHEUS_ADDR" default:":9090"` + PrometheusPath string `envconfig:"PROMETHEUS_PATH" default:"/metrics"` + PrometheusMapperYaml string `envconfig:"PROMETHEUS_MAPPER_YAML" default:""` // Settings for rate limit configuration RuntimePath string `envconfig:"RUNTIME_ROOT" default:"/srv/runtime_data/current"` diff --git a/src/stats/prom/default_mapper.yaml b/src/stats/prom/default_mapper.yaml new file mode 100644 index 00000000..df30b1dc --- /dev/null +++ b/src/stats/prom/default_mapper.yaml @@ -0,0 +1,89 @@ +# Requires statsd exporter >= v0.6.0 since it uses the "drop" action. +mappings: + - match: "ratelimit.service.rate_limit.*.*.near_limit" + name: "ratelimit_service_rate_limit_near_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + - match: "ratelimit.service.rate_limit.*.*.over_limit" + name: "ratelimit_service_rate_limit_over_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + - match: "ratelimit.service.rate_limit.*.*.total_hits" + name: "ratelimit_service_rate_limit_total_hits" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + - match: "ratelimit.service.rate_limit.*.*.within_limit" + name: "ratelimit_service_rate_limit_within_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + + - match: "ratelimit.service.rate_limit.*.*.*.near_limit" + name: "ratelimit_service_rate_limit_near_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" + - match: "ratelimit.service.rate_limit.*.*.*.over_limit" + name: "ratelimit_service_rate_limit_over_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" + - match: "ratelimit.service.rate_limit.*.*.*.total_hits" + name: "ratelimit_service_rate_limit_total_hits" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" + - match: "ratelimit.service.rate_limit.*.*.*.within_limit" + name: "ratelimit_service_rate_limit_within_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" + + - match: "ratelimit.service.call.should_rate_limit.*" + name: "ratelimit_service_should_rate_limit_error" + match_metric_type: counter + labels: + err_type: "$1" + + - match: "ratelimit_server.*.total_requests" + name: "ratelimit_service_total_requests" + match_metric_type: counter + labels: + grpc_method: "$1" + + - match: "ratelimit_server.*.response_time" + name: "ratelimit_service_response_time_seconds" + timer_type: histogram + labels: + grpc_method: "$1" + + - match: "ratelimit.service.config_load_success" + name: "ratelimit_service_config_load_success" + match_metric_type: counter + + - match: "ratelimit.service.config_load_error" + name: "ratelimit_service_config_load_error" + match_metric_type: counter + + - match: "ratelimit.service.rate_limit.*.*.*.shadow_mode" + name: "ratelimit_service_rate_limit_shadow_mode" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" diff --git a/src/stats/prom/prometheus_sink.go b/src/stats/prom/prometheus_sink.go new file mode 100644 index 00000000..58b27ece --- /dev/null +++ b/src/stats/prom/prometheus_sink.go @@ -0,0 +1,160 @@ +package prom + +import ( + _ "embed" + "net/http" + + "github.com/go-kit/log" + gostats "github.com/lyft/gostats" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/prometheus/statsd_exporter/pkg/event" + "github.com/prometheus/statsd_exporter/pkg/exporter" + "github.com/prometheus/statsd_exporter/pkg/mapper" + "github.com/sirupsen/logrus" +) + +var ( + //go:embed default_mapper.yaml + defaultMapper string + _ gostats.Sink = &prometheusSink{} + + eventsActions = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "statsd_exporter_events_actions_total", + Help: "The total number of StatsD events by action.", + }, + []string{"action"}, + ) + eventsUnmapped = promauto.NewCounter( + prometheus.CounterOpts{ + Name: "statsd_exporter_events_unmapped_total", + Help: "The total number of StatsD events no mapping was found for.", + }) + metricsCount = promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "statsd_exporter_metrics_total", + Help: "The total number of metrics.", + }, + []string{"type"}, + ) + conflictingEventStats = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "statsd_exporter_events_conflict_total", + Help: "The total number of StatsD events with conflicting names.", + }, + []string{"type"}, + ) + eventStats = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "statsd_exporter_events_total", + Help: "The total number of StatsD events seen.", + }, + []string{"type"}, + ) + errorEventStats = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "statsd_exporter_events_error_total", + Help: "The total number of StatsD events discarded due to errors.", + }, + []string{"reason"}, + ) +) + +type prometheusSink struct { + config struct { + addr string + path string + mapperYamlPath string + } + mapper *mapper.MetricMapper + events chan event.Events + exp *exporter.Exporter +} + +type prometheusSinkOption func(sink *prometheusSink) + +func WithAddr(addr string) prometheusSinkOption { + return func(sink *prometheusSink) { + sink.config.addr = addr + } +} + +func WithPath(path string) prometheusSinkOption { + return func(sink *prometheusSink) { + sink.config.path = path + } +} + +func WithMapperYamlPath(mapperYamlPath string) prometheusSinkOption { + return func(sink *prometheusSink) { + sink.config.mapperYamlPath = mapperYamlPath + } +} + +// NewPrometheusSink returns a Sink that flushes stats to os.StdErr. +func NewPrometheusSink(opts ...prometheusSinkOption) gostats.Sink { + promRegistry := prometheus.DefaultRegisterer + sink := &prometheusSink{ + events: make(chan event.Events), + mapper: &mapper.MetricMapper{ + Registerer: promRegistry, + }, + } + for _, opt := range opts { + opt(sink) + } + if sink.config.addr == "" { + sink.config.addr = ":9090" + } + if sink.config.path == "" { + sink.config.path = "/metrics" + } + http.Handle(sink.config.path, promhttp.Handler()) + go func() { + logrus.Infof("Starting prometheus sink on %s%s", sink.config.addr, sink.config.path) + _ = http.ListenAndServe(sink.config.addr, nil) + }() + if sink.config.mapperYamlPath != "" { + _ = sink.mapper.InitFromFile(sink.config.mapperYamlPath) + } else { + _ = sink.mapper.InitFromYAMLString(defaultMapper) + } + + sink.exp = exporter.NewExporter(promRegistry, + sink.mapper, log.NewNopLogger(), + eventsActions, eventsUnmapped, + errorEventStats, eventStats, + conflictingEventStats, metricsCount) + + go func() { + sink.exp.Listen(sink.events) + }() + + return sink +} + +func (s *prometheusSink) FlushCounter(name string, value uint64) { + s.events <- event.Events{&event.CounterEvent{ + CMetricName: name, + CValue: float64(value), + CLabels: make(map[string]string), + }} +} + +func (s *prometheusSink) FlushGauge(name string, value uint64) { + s.events <- event.Events{&event.GaugeEvent{ + GMetricName: name, + GValue: float64(value), + GLabels: make(map[string]string), + }} +} + +func (s *prometheusSink) FlushTimer(name string, value float64) { + s.events <- event.Events{&event.ObserverEvent{ + OMetricName: name, + OValue: value, + OLabels: make(map[string]string), + }} +} diff --git a/src/stats/prom/prometheus_sink_test.go b/src/stats/prom/prometheus_sink_test.go new file mode 100644 index 00000000..cc4b8900 --- /dev/null +++ b/src/stats/prom/prometheus_sink_test.go @@ -0,0 +1,111 @@ +package prom + +import ( + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var s = NewPrometheusSink() + +func TestFlushCounter(t *testing.T) { + s.FlushCounter("ratelimit_server.ShouldRateLimit.total_requests", 1) + assert.Eventually(t, func() bool { + metricFamilies, err := prometheus.DefaultGatherer.Gather() + require.NoError(t, err) + + metrics := make(map[string]*dto.MetricFamily) + for _, metricFamily := range metricFamilies { + metrics[*metricFamily.Name] = metricFamily + } + + m, ok := metrics["ratelimit_service_total_requests"] + require.True(t, ok) + require.Len(t, m.Metric, 1) + require.Equal(t, map[string]string{ + "grpc_method": "ShouldRateLimit", + }, toMap(m.Metric[0].Label)) + require.Equal(t, 1.0, *m.Metric[0].Counter.Value) + return true + }, time.Second, time.Millisecond) +} + +func toMap(labels []*dto.LabelPair) map[string]string { + m := make(map[string]string) + for _, l := range labels { + m[*l.Name] = *l.Value + } + return m +} + +func TestFlushCounterWithDifferentLabels(t *testing.T) { + s.FlushCounter("ratelimit.service.rate_limit.domain1.key1_val1.over_limit", 1) + s.FlushCounter("ratelimit.service.rate_limit.domain1.key1_val1.key2_val2.over_limit", 2) + assert.Eventually(t, func() bool { + metricFamilies, err := prometheus.DefaultGatherer.Gather() + require.NoError(t, err) + + metrics := make(map[string]*dto.MetricFamily) + for _, metricFamily := range metricFamilies { + metrics[*metricFamily.Name] = metricFamily + } + + m, ok := metrics["ratelimit_service_rate_limit_over_limit"] + require.True(t, ok) + require.Len(t, m.Metric, 2) + require.Equal(t, 1.0, *m.Metric[0].Counter.Value) + require.Equal(t, map[string]string{ + "domain": "domain1", + "key1": "key1_val1", + }, toMap(m.Metric[0].Label)) + require.Equal(t, 2.0, *m.Metric[1].Counter.Value) + require.Equal(t, map[string]string{ + "domain": "domain1", + "key1": "key1_val1", + "key2": "key2_val2", + }, toMap(m.Metric[1].Label)) + return true + }, time.Second, time.Millisecond) +} + +func TestFlushGauge(t *testing.T) { + s.FlushGauge("ratelimit.service.rate_limit.domain1.key1.test_gauge", 1) + metricFamilies, err := prometheus.DefaultGatherer.Gather() + require.NoError(t, err) + + metrics := make(map[string]*dto.MetricFamily) + for _, metricFamily := range metricFamilies { + metrics[*metricFamily.Name] = metricFamily + } + + _, ok := metrics["ratelimit_service_rate_limit_test_gauge"] + require.False(t, ok) +} + +func TestFlushTimer(t *testing.T) { + s.FlushTimer("ratelimit.service.rate_limit.mongo_cps.database_users.total_hits", 1) + assert.Eventually(t, func() bool { + metricFamilies, err := prometheus.DefaultGatherer.Gather() + require.NoError(t, err) + + metrics := make(map[string]*dto.MetricFamily) + for _, metricFamily := range metricFamilies { + metrics[*metricFamily.Name] = metricFamily + } + + m, ok := metrics["ratelimit_service_rate_limit_total_hits"] + require.True(t, ok) + require.Len(t, m.Metric, 1) + require.Equal(t, uint64(1), *m.Metric[0].Histogram.SampleCount) + require.Equal(t, map[string]string{ + "domain": "mongo_cps", + "key1": "database_users", + }, toMap(m.Metric[0].Label)) + require.Equal(t, 1.0, *m.Metric[0].Histogram.SampleSum) + return true + }, time.Second, time.Millisecond) +} From 0e630f156ae7a3d4198daf971af2e6c595f19a70 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 7 Nov 2024 16:14:09 -0800 Subject: [PATCH 168/181] build(deps): bump alpine from 3.20.2 to 3.20.3 (#701) Bumps alpine from 3.20.2 to 3.20.3. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 6d826d86..bd9d1569 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,6 +10,6 @@ COPY script script RUN CGO_ENABLED=0 GOOS=linux go build -o /go/bin/ratelimit -ldflags="-w -s" -v github.com/envoyproxy/ratelimit/src/service_cmd -FROM alpine:3.20.2@sha256:0a4eaa0eecf5f8c050e5bba433f58c052be7587ee8af3e8b3910ef9ab5fbe9f5 AS final +FROM alpine:3.20.3@sha256:beefdbd8a1da6d2915566fde36db9db0b524eb737fc57cd1367effd16dc0d06d AS final RUN apk --no-cache add ca-certificates && apk --no-cache update COPY --from=build /go/bin/ratelimit /bin/ratelimit From ad9ee271438f7ad18e0602691ac880d93e745417 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Nov 2024 16:54:44 -0800 Subject: [PATCH 169/181] build(deps): bump alpine in /integration-test (#769) Bumps alpine from `0a4eaa0` to `1e42bbe`. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- integration-test/Dockerfile.tester | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-test/Dockerfile.tester b/integration-test/Dockerfile.tester index fdf72745..2380297a 100644 --- a/integration-test/Dockerfile.tester +++ b/integration-test/Dockerfile.tester @@ -1,4 +1,4 @@ -FROM alpine@sha256:0a4eaa0eecf5f8c050e5bba433f58c052be7587ee8af3e8b3910ef9ab5fbe9f5 +FROM alpine@sha256:1e42bbe2508154c9126d48c2b8a75420c3544343bf86fd041fb7527e017a4b4a USER root From 65f8465630b4380137315f407d3df7e8acc358f8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Nov 2024 16:55:55 -0800 Subject: [PATCH 170/181] build(deps): bump alpine from `beefdbd` to `1e42bbe` (#768) Bumps alpine from `beefdbd` to `1e42bbe`. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index bd9d1569..979003cc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,6 +10,6 @@ COPY script script RUN CGO_ENABLED=0 GOOS=linux go build -o /go/bin/ratelimit -ldflags="-w -s" -v github.com/envoyproxy/ratelimit/src/service_cmd -FROM alpine:3.20.3@sha256:beefdbd8a1da6d2915566fde36db9db0b524eb737fc57cd1367effd16dc0d06d AS final +FROM alpine:3.20.3@sha256:1e42bbe2508154c9126d48c2b8a75420c3544343bf86fd041fb7527e017a4b4a AS final RUN apk --no-cache add ca-certificates && apk --no-cache update COPY --from=build /go/bin/ratelimit /bin/ratelimit From 49af5cca965c6881adeb5fa2afc4c156d1fe6bb2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Nov 2024 17:28:46 -0800 Subject: [PATCH 171/181] build(deps): bump golang from 1.22.6 to 1.23.3 (#763) Bumps golang from 1.22.6 to 1.23.3. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- Dockerfile.integration | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 979003cc..4915eaad 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.6@sha256:2bd56f00ff47baf33e64eae7996b65846c7cb5e0a46e0a882ef179fd89654afa AS build +FROM golang:1.23.3@sha256:d56c3e08fe5b27729ee3834854ae8f7015af48fd651cd25d1e3bcf3c19830174 AS build WORKDIR /ratelimit ENV GOPROXY=https://proxy.golang.org diff --git a/Dockerfile.integration b/Dockerfile.integration index 458dce18..adc2c677 100644 --- a/Dockerfile.integration +++ b/Dockerfile.integration @@ -1,5 +1,5 @@ # Running this docker image runs the integration tests. -FROM golang@sha256:2bd56f00ff47baf33e64eae7996b65846c7cb5e0a46e0a882ef179fd89654afa +FROM golang@sha256:d56c3e08fe5b27729ee3834854ae8f7015af48fd651cd25d1e3bcf3c19830174 RUN apt-get update -y && apt-get install sudo stunnel4 redis memcached -y && rm -rf /var/lib/apt/lists/* From 83a4d250397c1203bf2a4c9381ecdb0fb12169ed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Nov 2024 17:29:04 -0800 Subject: [PATCH 172/181] build(deps): bump golang in /examples/xds-sotw-config-server (#762) Bumps golang from 1.22.6 to 1.23.3. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/xds-sotw-config-server/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/xds-sotw-config-server/Dockerfile b/examples/xds-sotw-config-server/Dockerfile index 1c5068f9..5b119007 100644 --- a/examples/xds-sotw-config-server/Dockerfile +++ b/examples/xds-sotw-config-server/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.6@sha256:2bd56f00ff47baf33e64eae7996b65846c7cb5e0a46e0a882ef179fd89654afa AS build +FROM golang:1.23.3@sha256:d56c3e08fe5b27729ee3834854ae8f7015af48fd651cd25d1e3bcf3c19830174 AS build WORKDIR /xds-server COPY . . From 15b5ac4b45fbf6fdcb13e109077a103096c9f2e2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2024 00:07:53 -0800 Subject: [PATCH 173/181] build(deps): bump github/codeql-action from 3.26.0 to 3.27.5 (#772) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.26.0 to 3.27.5. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/eb055d739abdc2e8de2e5f4ba1a8b246daa779aa...f09c1c0a94de965c15400f5634aa42fac8fb8f88) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecard.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index e47692d9..b9ffbc42 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -35,14 +35,14 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa # v3.26.0 + uses: github/codeql-action/init@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa # v3.26.0 + uses: github/codeql-action/autobuild@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa # v3.26.0 + uses: github/codeql-action/analyze@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index ea649e37..bf7313ac 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -64,6 +64,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa # v3.26.0 + uses: github/codeql-action/upload-sarif@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5 with: sarif_file: results.sarif From a53a4fd42543bdd807fd61dcb8fb218f59c3dca3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Nov 2024 09:29:11 -0800 Subject: [PATCH 174/181] build(deps): bump golang from `d56c3e0` to `73f06be` (#771) Bumps golang from `d56c3e0` to `73f06be`. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- Dockerfile.integration | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 4915eaad..5130c554 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.23.3@sha256:d56c3e08fe5b27729ee3834854ae8f7015af48fd651cd25d1e3bcf3c19830174 AS build +FROM golang:1.23.3@sha256:73f06be4578c9987ce560087e2e2ea6485fb605e3910542cadd8fa09fc5f3e31 AS build WORKDIR /ratelimit ENV GOPROXY=https://proxy.golang.org diff --git a/Dockerfile.integration b/Dockerfile.integration index adc2c677..5c7cbdb8 100644 --- a/Dockerfile.integration +++ b/Dockerfile.integration @@ -1,5 +1,5 @@ # Running this docker image runs the integration tests. -FROM golang@sha256:d56c3e08fe5b27729ee3834854ae8f7015af48fd651cd25d1e3bcf3c19830174 +FROM golang@sha256:73f06be4578c9987ce560087e2e2ea6485fb605e3910542cadd8fa09fc5f3e31 RUN apt-get update -y && apt-get install sudo stunnel4 redis memcached -y && rm -rf /var/lib/apt/lists/* From b9bb97adb5c47dbb34423a8b7f3f7d0b0b5b1960 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 1 Dec 2024 14:29:45 -0800 Subject: [PATCH 175/181] build(deps): bump golang in /examples/xds-sotw-config-server (#770) Bumps golang from `d56c3e0` to `73f06be`. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/xds-sotw-config-server/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/xds-sotw-config-server/Dockerfile b/examples/xds-sotw-config-server/Dockerfile index 5b119007..9225a511 100644 --- a/examples/xds-sotw-config-server/Dockerfile +++ b/examples/xds-sotw-config-server/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.23.3@sha256:d56c3e08fe5b27729ee3834854ae8f7015af48fd651cd25d1e3bcf3c19830174 AS build +FROM golang:1.23.3@sha256:73f06be4578c9987ce560087e2e2ea6485fb605e3910542cadd8fa09fc5f3e31 AS build WORKDIR /xds-server COPY . . From 738fdce19390fbdbde300939d0d531ee6bce76a0 Mon Sep 17 00:00:00 2001 From: zirain Date: Tue, 3 Dec 2024 15:55:47 +0800 Subject: [PATCH 176/181] fix go-fumpt args (#694) Signed-off-by: zirain --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f23dc042..96124508 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,7 +7,7 @@ repos: - id: go-imports args: ["-w", "-local", "github.com/envoyproxy/ratelimit"] - id: go-fumpt - args: ["-w"] + args: ["-l", "-w", "."] - repo: https://github.com/pre-commit/mirrors-prettier rev: "v2.4.1" From b2cf3b6cd558c5a26e4f81e79e7ab108a72cfed0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2024 23:57:54 -0800 Subject: [PATCH 177/181] build(deps): bump actions/setup-python from 5.1.1 to 5.3.0 (#766) Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5.1.1 to 5.3.0. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/39cd14951b08e74b54015e9e001cdefcf80e669f...0b93645e9fea7318ecaed2b359559ac225c90a2b) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/main.yaml | 2 +- .github/workflows/pullrequest.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index d4fabc22..15a2b2a3 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -43,7 +43,7 @@ jobs: steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: "3.9" diff --git a/.github/workflows/pullrequest.yaml b/.github/workflows/pullrequest.yaml index 211a2711..73c45a85 100644 --- a/.github/workflows/pullrequest.yaml +++ b/.github/workflows/pullrequest.yaml @@ -31,7 +31,7 @@ jobs: steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: "3.9" From 1818d41e0b6475942c01419e0f41a79f33f86b99 Mon Sep 17 00:00:00 2001 From: zirain Date: Thu, 5 Dec 2024 08:32:39 +0800 Subject: [PATCH 178/181] change to github.com/libp2p/go-reuseport (#774) Signed-off-by: zirain --- go.mod | 6 +++--- go.sum | 4 ++-- src/server/server_impl.go | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index b7b3051e..91e88fd4 100644 --- a/go.mod +++ b/go.mod @@ -8,17 +8,19 @@ require ( github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 github.com/coocood/freecache v1.2.4 github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b + github.com/go-kit/log v0.2.1 github.com/golang/mock v1.6.0 github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.1 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/jpillora/backoff v1.0.0 - github.com/kavu/go_reuseport v1.5.0 github.com/kelseyhightower/envconfig v1.4.0 + github.com/libp2p/go-reuseport v0.4.0 github.com/lyft/goruntime v0.3.0 github.com/lyft/gostats v0.4.14 github.com/mediocregopher/radix/v3 v3.8.1 github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_model v0.6.0 github.com/prometheus/statsd_exporter v0.26.1 github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.9.0 @@ -47,7 +49,6 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -55,7 +56,6 @@ require ( github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.6.0 // indirect github.com/prometheus/common v0.48.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/stretchr/objx v0.5.2 // indirect diff --git a/go.sum b/go.sum index b54c5c8b..9777a08a 100644 --- a/go.sum +++ b/go.sum @@ -80,8 +80,6 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1 github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/kavu/go_reuseport v1.5.0 h1:UNuiY2OblcqAtVDE8Gsg1kZz8zbBWg907sP1ceBV+bk= -github.com/kavu/go_reuseport v1.5.0/go.mod h1:CG8Ee7ceMFSMnx/xr25Vm0qXaj2Z4i5PWoUx+JZ5/CU= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -95,6 +93,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= +github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= github.com/lyft/goruntime v0.3.0 h1:VLBYR4s3XazkUT8lLtq9CJrt58YmLQQumrK3ktenEkI= github.com/lyft/goruntime v0.3.0/go.mod h1:BW1gngSpMJR9P9w23BPUPdhdbUWhpirl98TQhOWWMF4= github.com/lyft/gostats v0.4.1/go.mod h1:Tpx2xRzz4t+T2Tx0xdVgIoBdR2UMVz+dKnE3X01XSd8= diff --git a/src/server/server_impl.go b/src/server/server_impl.go index f2341917..e9402da0 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -25,7 +25,7 @@ import ( "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/gorilla/mux" - reuseport "github.com/kavu/go_reuseport" + "github.com/libp2p/go-reuseport" gostats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" "google.golang.org/grpc" From d36a71b86354591a9b1df9e850156bd1ac1a1761 Mon Sep 17 00:00:00 2001 From: Rico Pahlisch Date: Fri, 6 Dec 2024 18:23:36 +0100 Subject: [PATCH 179/181] enable rate limit for month and year (#743) Signed-off-by: Rico Pahlisch Signed-off-by: Rico Pahlisch --- api/ratelimit/config/ratelimit/v3/rls_conf.proto | 6 ++++++ src/utils/utilities.go | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/api/ratelimit/config/ratelimit/v3/rls_conf.proto b/api/ratelimit/config/ratelimit/v3/rls_conf.proto index a1b90fc4..cbe313c9 100644 --- a/api/ratelimit/config/ratelimit/v3/rls_conf.proto +++ b/api/ratelimit/config/ratelimit/v3/rls_conf.proto @@ -91,4 +91,10 @@ enum RateLimitUnit { // The time unit representing a day. DAY = 4; + + // The time unit representing a month. + MONTH = 5; + + // The time unit representing a year. + YEAR = 6; } diff --git a/src/utils/utilities.go b/src/utils/utilities.go index abe787c0..98d0b385 100644 --- a/src/utils/utilities.go +++ b/src/utils/utilities.go @@ -26,6 +26,10 @@ func UnitToDivider(unit pb.RateLimitResponse_RateLimit_Unit) int64 { return 60 * 60 case pb.RateLimitResponse_RateLimit_DAY: return 60 * 60 * 24 + case pb.RateLimitResponse_RateLimit_MONTH: + return 60 * 60 * 24 * 30 + case pb.RateLimitResponse_RateLimit_YEAR: + return 60 * 60 * 24 * 356 } panic("should not get here") From 44de8d982b6e2492952a5b94c1684a2229402aec Mon Sep 17 00:00:00 2001 From: Arko Dasgupta Date: Fri, 6 Dec 2024 10:16:40 -0800 Subject: [PATCH 180/181] Fix time conversion for year (#775) * 365 days in a year, not 356 Signed-off-by: Arko Dasgupta --- src/utils/utilities.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/utils/utilities.go b/src/utils/utilities.go index 98d0b385..83f55979 100644 --- a/src/utils/utilities.go +++ b/src/utils/utilities.go @@ -29,7 +29,7 @@ func UnitToDivider(unit pb.RateLimitResponse_RateLimit_Unit) int64 { case pb.RateLimitResponse_RateLimit_MONTH: return 60 * 60 * 24 * 30 case pb.RateLimitResponse_RateLimit_YEAR: - return 60 * 60 * 24 * 356 + return 60 * 60 * 24 * 365 } panic("should not get here") From 6a2e8262874f012d08830cc34ba8058e66a33819 Mon Sep 17 00:00:00 2001 From: zirain Date: Wed, 11 Dec 2024 10:56:13 +0800 Subject: [PATCH 181/181] add prometheus sink debug info (#710) Signed-off-by: zirain --- src/stats/prom/prometheus_sink.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/stats/prom/prometheus_sink.go b/src/stats/prom/prometheus_sink.go index 58b27ece..2c93c49c 100644 --- a/src/stats/prom/prometheus_sink.go +++ b/src/stats/prom/prometheus_sink.go @@ -136,6 +136,7 @@ func NewPrometheusSink(opts ...prometheusSinkOption) gostats.Sink { } func (s *prometheusSink) FlushCounter(name string, value uint64) { + logrus.Debugf("FlushCounter: %s %d", name, value) s.events <- event.Events{&event.CounterEvent{ CMetricName: name, CValue: float64(value), @@ -144,6 +145,7 @@ func (s *prometheusSink) FlushCounter(name string, value uint64) { } func (s *prometheusSink) FlushGauge(name string, value uint64) { + logrus.Debugf("FlushGauge: %s %d", name, value) s.events <- event.Events{&event.GaugeEvent{ GMetricName: name, GValue: float64(value), @@ -152,6 +154,7 @@ func (s *prometheusSink) FlushGauge(name string, value uint64) { } func (s *prometheusSink) FlushTimer(name string, value float64) { + logrus.Debugf("FlushTimer: %s %v", name, value) s.events <- event.Events{&event.ObserverEvent{ OMetricName: name, OValue: value,