Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix flakey tests with DurationUntilReset. Update docker example to V3 config. #192

Merged
merged 24 commits into from
Nov 19, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
9908cbe
Add example prom statsd exporter conf
ysawa0 Oct 21, 2020
c90a829
Add example prom statsd exporter conf
ysawa0 Oct 21, 2020
c7cb343
Merge pull request #6 from ysawa0/pull
ysawa0 Oct 21, 2020
2e74762
Merge branch 'master' of https://github.com/ysawa0/ratelimit into doc…
ysawa0 Oct 21, 2020
d920809
Convert example envoy conf to v3
ysawa0 Nov 3, 2020
7edd2e8
Merge branch 'master' of https://github.com/envoyproxy/ratelimit into…
ysawa0 Nov 3, 2020
faf3ff8
Merge branch 'master' of https://github.com/ysawa0/ratelimit into master
ysawa0 Nov 3, 2020
75a308a
Merge branch 'master' of https://github.com/ysawa0/ratelimit into doc…
ysawa0 Nov 3, 2020
559c975
CI
ysawa0 Nov 16, 2020
87ea936
Ensure DurationUntilReset is always the same in integration test
ysawa0 Nov 17, 2020
49f0869
Remove notNils
ysawa0 Nov 17, 2020
cf43c06
fix dur remain
ysawa0 Nov 17, 2020
db30fb8
fix dur remain
ysawa0 Nov 17, 2020
6f6349a
Add test for DurationUntilReset
ysawa0 Nov 17, 2020
2dc3660
durRemain to durRemaining
ysawa0 Nov 17, 2020
d5ec5a8
Add test for duration until reset
ysawa0 Nov 17, 2020
daab215
Add test for duration until reset
ysawa0 Nov 17, 2020
747db97
Add test for duration until reset
ysawa0 Nov 17, 2020
24cb29f
Add test for duration until reset
ysawa0 Nov 17, 2020
bf0a2cd
Add test for duration until reset
ysawa0 Nov 17, 2020
f89e9d7
Add test for duration until reset
ysawa0 Nov 17, 2020
0e82602
Check resp not nil
ysawa0 Nov 17, 2020
eb559d8
Verbiage
ysawa0 Nov 17, 2020
5e50ab8
Fix tests for localcache
ysawa0 Nov 19, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions docker-compose-example.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,19 @@ services:

statsd:
image: prom/statsd-exporter:v0.18.0
entrypoint: /bin/statsd_exporter
command:
- "--statsd.mapping-config=/etc/statsd-exporter/conf.yaml"
expose:
- 9125
- 9102
ports:
- 9125:9125
- 9102:9102 # Visit http://localhost:9102/metrics to see metrics in Prometheus format
networks:
- ratelimit-network
volumes:
- ./examples/prom-statsd-exporter/conf.yaml:/etc/statsd-exporter/conf.yaml

ratelimit:
image: envoyproxy/ratelimit:master
Expand Down
53 changes: 27 additions & 26 deletions examples/envoy/mock.yaml
Original file line number Diff line number Diff line change
@@ -1,31 +1,32 @@
static_resources:
listeners:
- address:
socket_address:
address: 0.0.0.0
port_value: 9999
filter_chains:
- filters:
- name: envoy.http_connection_manager
config:
codec_type: auto
stat_prefix: ingress
route_config:
name: ingress
virtual_hosts:
- name: backend
domains:
- "*"
routes:
- match:
prefix: "/"
direct_response:
status: "200"
body:
inline_string: "Hello World"
http_filters:
- name: envoy.router
config: {}
- address:
socket_address:
address: 0.0.0.0
port_value: 9999
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
codec_type: AUTO
stat_prefix: ingress
route_config:
name: ingress
virtual_hosts:
- name: backend
domains:
- "*"
routes:
- match:
prefix: "/"
direct_response:
status: "200"
body:
inline_string: "Hello World"
http_filters:
- name: envoy.filters.http.router
typed_config: {}
admin:
access_log_path: "/dev/null"
address:
Expand Down
15 changes: 8 additions & 7 deletions examples/envoy/proxy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,10 @@ static_resources:
port_value: 8888
filter_chains:
- filters:
- name: envoy.http_connection_manager
config:
codec_type: auto
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
codec_type: AUTO
stat_prefix: ingress
http_filters:
- name: envoy.rate_limit
Expand All @@ -57,8 +58,8 @@ static_resources:
grpc_service:
envoy_grpc:
cluster_name: ratelimit
- name: envoy.router
config: {}
- name: envoy.filters.http.router
typed_config: {}
route_config:
name: route
virtual_hosts:
Expand All @@ -72,8 +73,8 @@ static_resources:
cluster: mock
rate_limits:
- actions:
- source_cluster: {}
- destination_cluster: {}
- source_cluster: {} # This action's value is populated by the "service-cluster" arg passed in when starting Envoy. In this example, it's "proxy" (see docker-compose-example.yml)
- destination_cluster: {} # This action's value is populated by the value set in the above "cluster" field -- "mock"
- match:
prefix: /header
route:
Expand Down
67 changes: 67 additions & 0 deletions examples/prom-statsd-exporter/conf.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
mappings: # Requires statsd exporter >= v0.6.0 since it uses the "drop" action.
- match:
"ratelimit.service.rate_limit.*.*.near_limit"
name: "ratelimit_service_rate_limit_near_limit"
timer_type: "histogram"
labels:
domain: "$1"
key1: "$2"
- match:
"ratelimit.service.rate_limit.*.*.over_limit"
name: "ratelimit_service_rate_limit_over_limit"
timer_type: "histogram"
labels:
domain: "$1"
key1: "$2"
- match:
"ratelimit.service.rate_limit.*.*.total_hits"
name: "ratelimit_service_rate_limit_total_hits"
timer_type: "histogram"
labels:
domain: "$1"
key1: "$2"

- match:
"ratelimit.service.rate_limit.*.*.*.near_limit"
name: "ratelimit_service_rate_limit_near_limit"
timer_type: "histogram"
labels:
domain: "$1"
key1: "$2"
key2: "$3"
- match:
"ratelimit.service.rate_limit.*.*.*.over_limit"
name: "ratelimit_service_rate_limit_over_limit"
timer_type: "histogram"
labels:
domain: "$1"
key1: "$2"
key2: "$3"
- match:
"ratelimit.service.rate_limit.*.*.*.total_hits"
name: "ratelimit_service_rate_limit_total_hits"
timer_type: "histogram"
labels:
domain: "$1"
key1: "$2"
key2: "$3"

- match: "ratelimit.service.call.should_rate_limit.*"
name: "ratelimit_service_should_rate_limit_error"
match_metric_type: counter
labels:
err_type: "$1"

- match: "ratelimit.service.config_load_success"
name: "ratelimit_service_config_load_success"
match_metric_type: counter
- match: "ratelimit.service.config_load_error"
name: "ratelimit_service_config_load_error"
match_metric_type: counter
- match: "ratelimit.service.config_load_error"
name: "ratelimit_service_config_load_error"
match_metric_type: counter
- match: "."
match_type: "regex"
action: "drop"
name: "dropped"
49 changes: 31 additions & 18 deletions test/integration/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,26 +14,22 @@ import (
pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2"
pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3"
"github.com/envoyproxy/ratelimit/src/service_cmd/runner"
"github.com/envoyproxy/ratelimit/src/utils"
"github.com/envoyproxy/ratelimit/test/common"
"github.com/golang/protobuf/ptypes/duration"
"github.com/stretchr/testify/assert"
"golang.org/x/net/context"
"google.golang.org/grpc"
)

func newDescriptorStatus(
status pb.RateLimitResponse_Code, requestsPerUnit uint32,
unit pb.RateLimitResponse_RateLimit_Unit, limitRemaining uint32) *pb.RateLimitResponse_DescriptorStatus {
func newDescriptorStatus(status pb.RateLimitResponse_Code, requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, limitRemaining uint32, durRemaining *duration.Duration) *pb.RateLimitResponse_DescriptorStatus {

limit := &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}
sec := utils.UnitToDivider(unit)
now := time.Now().Unix()

return &pb.RateLimitResponse_DescriptorStatus{
Code: status,
CurrentLimit: limit,
LimitRemaining: limitRemaining,
DurationUntilReset: &duration.Duration{Seconds: sec - now%sec},
Code: status,
CurrentLimit: limit,
LimitRemaining: limitRemaining,
DurationUntilReset: &duration.Duration{Seconds: durRemaining.GetSeconds()},
}
}

Expand Down Expand Up @@ -298,12 +294,14 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu
response, err = c.ShouldRateLimit(
context.Background(),
common.NewRateLimitRequest("basic", [][][2]string{{{getCacheKey("key1", enable_local_cache), "foo"}}}, 1))
durRemaining := response.GetStatuses()[0].DurationUntilReset

common.AssertProtoEqual(
assert,
&pb.RateLimitResponse{
OverallCode: pb.RateLimitResponse_OK,
Statuses: []*pb.RateLimitResponse_DescriptorStatus{
newDescriptorStatus(pb.RateLimitResponse_OK, 50, pb.RateLimitResponse_RateLimit_SECOND, 49)}},
newDescriptorStatus(pb.RateLimitResponse_OK, 50, pb.RateLimitResponse_RateLimit_SECOND, 49, durRemaining)}},
response)
assert.NoError(err)

Expand Down Expand Up @@ -338,13 +336,14 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu
status = pb.RateLimitResponse_OVER_LIMIT
limitRemaining = 0
}
durRemaining = response.GetStatuses()[0].DurationUntilReset

common.AssertProtoEqual(
assert,
&pb.RateLimitResponse{
OverallCode: status,
Statuses: []*pb.RateLimitResponse_DescriptorStatus{
newDescriptorStatus(status, 20, pb.RateLimitResponse_RateLimit_MINUTE, limitRemaining)}},
newDescriptorStatus(status, 20, pb.RateLimitResponse_RateLimit_MINUTE, limitRemaining, durRemaining)}},
response)
assert.NoError(err)
key2HitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.total_hits", getCacheKey("key2", enable_local_cache)))
Expand All @@ -355,7 +354,6 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu
} else {
assert.Equal(0, int(key2OverlimitCounter.Value()))
}

key2LocalCacheOverLimitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.over_limit_with_local_cache", getCacheKey("key2", enable_local_cache)))
if enable_local_cache && i >= 20 {
assert.Equal(i-20, int(key2LocalCacheOverLimitCounter.Value()))
Expand Down Expand Up @@ -402,14 +400,15 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu
status = pb.RateLimitResponse_OVER_LIMIT
limitRemaining2 = 0
}

durRemaining1 := response.GetStatuses()[0].DurationUntilReset
durRemaining2 := response.GetStatuses()[1].DurationUntilReset
common.AssertProtoEqual(
assert,
&pb.RateLimitResponse{
OverallCode: status,
Statuses: []*pb.RateLimitResponse_DescriptorStatus{
newDescriptorStatus(pb.RateLimitResponse_OK, 20, pb.RateLimitResponse_RateLimit_MINUTE, limitRemaining1),
newDescriptorStatus(status, 10, pb.RateLimitResponse_RateLimit_HOUR, limitRemaining2)}},
newDescriptorStatus(pb.RateLimitResponse_OK, 20, pb.RateLimitResponse_RateLimit_MINUTE, limitRemaining1, durRemaining1),
newDescriptorStatus(status, 10, pb.RateLimitResponse_RateLimit_HOUR, limitRemaining2, durRemaining2)}},
response)
assert.NoError(err)

Expand Down Expand Up @@ -465,8 +464,20 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu
} else {
assert.Equal(0, int(localCacheMissCounter.Value()))
}

}

// Test DurationUntilReset by hitting same key twice
resp1, err := c.ShouldRateLimit(
context.Background(),
common.NewRateLimitRequest("another", [][][2]string{{{getCacheKey("key4", enable_local_cache), "durTest"}}}, 1))

time.Sleep(2 * time.Second) // Wait to allow duration to tick down

resp2, err := c.ShouldRateLimit(
context.Background(),
common.NewRateLimitRequest("another", [][][2]string{{{getCacheKey("key4", enable_local_cache), "durTest"}}}, 1))

assert.Less(resp2.GetStatuses()[0].DurationUntilReset.GetSeconds(), resp1.GetStatuses()[0].DurationUntilReset.GetSeconds())
}
}

Expand Down Expand Up @@ -669,12 +680,14 @@ func testConfigReload(grpcPort, perSecond string, local_cache_size string) func(
response, err = c.ShouldRateLimit(
context.Background(),
common.NewRateLimitRequest("reload", [][][2]string{{{getCacheKey("key1", enable_local_cache), "foo"}}}, 1))

durRemaining := response.GetStatuses()[0].DurationUntilReset
common.AssertProtoEqual(
assert,
&pb.RateLimitResponse{
OverallCode: pb.RateLimitResponse_OK,
Statuses: []*pb.RateLimitResponse_DescriptorStatus{
newDescriptorStatus(pb.RateLimitResponse_OK, 50, pb.RateLimitResponse_RateLimit_SECOND, 49)}},
newDescriptorStatus(pb.RateLimitResponse_OK, 50, pb.RateLimitResponse_RateLimit_SECOND, 49, durRemaining)}},
response)
assert.NoError(err)

Expand Down
10 changes: 10 additions & 0 deletions test/integration/runtime/current/ratelimit/config/another.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,3 +19,13 @@ descriptors:
rate_limit:
unit: hour
requests_per_unit: 10

- key: key4
rate_limit:
unit: day
requests_per_unit: 20

- key: key4_local
rate_limit:
unit: day
requests_per_unit: 20
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,4 @@ descriptors:
- key: one_per_minute
rate_limit:
unit: minute
requests_per_unit: 1
requests_per_unit: 1