From 21fc4d37b8ca3ce5ceaa3c46705ef7aebaef5907 Mon Sep 17 00:00:00 2001 From: Yuki Sawa Date: Tue, 5 Oct 2021 09:12:42 -0700 Subject: [PATCH] CI: Add pre-commit linters, autoformatters (#299) * Add pre-commit linters, formatters Signed-off-by: Yuki Sawa * add descriptive name for fail msg step Signed-off-by: Yuki Sawa * shorten precommit makefile cmd Signed-off-by: Yuki Sawa * make fix_format Signed-off-by: Yuki Sawa * autoformat all lint errs Signed-off-by: Yuki Sawa * backtick quote to avoid formatter Signed-off-by: Yuki Sawa * add shfmt for bash linting Signed-off-by: Yuki Sawa --- .github/workflows/main.yaml | 20 +++ .github/workflows/pullrequest.yaml | 30 +++- .github/workflows/release.yaml | 4 +- .github/workflows/stale.yml | 64 ++++----- .pre-commit-config.yaml | 31 +++++ .prettierrc.yaml | 1 + CONTRIBUTING.md | 21 ++- Makefile | 12 +- OWNERS.md | 4 +- README.md | 131 ++++++++++-------- docker-compose-example.yml | 12 +- docker-compose.yml | 3 +- examples/prom-statsd-exporter/conf.yaml | 28 ++-- .../docker-compose-integration-test.yml | 12 +- integration-test/run-all.sh | 19 ++- integration-test/scripts/simple-get.sh | 6 +- integration-test/scripts/trigger-ratelimit.sh | 28 ++-- .../scripts/trigger-shadow-mode-key.sh | 24 ++-- requirements-dev.txt | 1 + script/docs_check_format | 8 +- script/docs_fix_format | 2 +- script/lint | 5 +- src/config/config.go | 3 +- src/config/config_impl.go | 4 +- src/config_check_cmd/main.go | 8 +- src/limiter/base_limiter.go | 9 +- src/limiter/cache.go | 3 +- src/limiter/cache_key.go | 4 +- src/memcached/cache_impl.go | 3 +- src/metrics/metrics.go | 3 +- src/redis/cache_impl.go | 1 + src/redis/fixed_cache_impl.go | 5 +- src/server/server_impl.go | 19 ++- src/service/ratelimit.go | 9 +- src/service_cmd/runner/runner.go | 3 +- src/srv/srv.go | 2 - src/stats/manager_impl.go | 3 +- test/common/common.go | 1 - test/config/config_test.go | 6 +- test/config/duplicate_key.yaml | 2 +- test/config/misspelled_key.yaml | 2 +- test/config/misspelled_key2.yaml | 2 +- test/config/non_map_list.yaml | 2 +- test/integration/integration_test.go | 35 +++-- test/limiter/base_limiter_test.go | 9 +- test/memcached/cache_impl_test.go | 53 ++++--- .../memcached/stats_collecting_client_test.go | 5 +- test/metrics/metrics_test.go | 3 +- test/mocks/config/config.go | 6 +- test/mocks/limiter/limiter.go | 6 +- test/mocks/memcached/client.go | 3 +- test/mocks/redis/redis.go | 6 +- test/mocks/rls/rls.go | 3 +- test/mocks/runtime/loader/loader.go | 3 +- test/mocks/runtime/snapshot/snapshot.go | 5 +- test/mocks/stats/manager.go | 3 +- test/mocks/utils/utils.go | 3 +- test/redis/bench_test.go | 6 +- test/redis/driver_impl_test.go | 3 +- test/redis/fixed_cache_impl_test.go | 66 ++++++--- test/server/health_test.go | 4 +- test/server/server_impl_test.go | 10 +- test/service/ratelimit_test.go | 96 ++++++++----- test/srv/srv_test.go | 3 +- 64 files changed, 552 insertions(+), 339 deletions(-) create mode 100644 .pre-commit-config.yaml create mode 100644 .prettierrc.yaml create mode 100644 requirements-dev.txt diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 54617022..ae305019 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -12,6 +12,7 @@ jobs: - uses: actions/checkout@v2 - name: check format run: make check_format + build: runs-on: ubuntu-latest steps: @@ -25,3 +26,22 @@ jobs: env: DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + precommits: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - uses: actions/setup-python@v2 + with: + python-version: "3.9" + + - uses: actions/setup-go@v2 + with: + go-version: "1.16" + + - name: run pre-commits + run: | + make precommit_install + pre-commit run -a diff --git a/.github/workflows/pullrequest.yaml b/.github/workflows/pullrequest.yaml index da03fd13..164a61a4 100644 --- a/.github/workflows/pullrequest.yaml +++ b/.github/workflows/pullrequest.yaml @@ -6,10 +6,13 @@ on: jobs: check: runs-on: ubuntu-latest + steps: - uses: actions/checkout@v2 + - name: check format run: make check_format + build: runs-on: ubuntu-latest @@ -17,5 +20,30 @@ jobs: - uses: actions/checkout@v2 - name: build and test + run: make docker_tests + + precommits: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - uses: actions/setup-python@v2 + with: + python-version: "3.9" + + - uses: actions/setup-go@v2 + with: + go-version: "1.16" + + - name: run pre-commits + run: | + make precommit_install + pre-commit run -a + + # If previous stage fails, print resolution steps + - if: ${{ failure() }} + name: Read for resolution steps run: | - make docker_tests + echo "Pre-commits failed! Run 'make precommit_install' then 'pre-commits run -a' to fix." + exit 1 diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index e2349f99..bcfc82b1 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -3,7 +3,7 @@ name: Build and push :release image on: push: tags: - - 'v*' + - "v*" jobs: check: @@ -23,4 +23,4 @@ jobs: make docker_push env: DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} - DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} \ No newline at end of file + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 3c332fad..72a49005 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -1,7 +1,7 @@ on: workflow_dispatch: schedule: - - cron: '0 */4 * * *' + - cron: "0 */4 * * *" jobs: prune_stale: @@ -9,34 +9,34 @@ jobs: runs-on: ubuntu-latest steps: - - name: Prune Stale - uses: actions/stale@v3.0.14 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - # Different amounts of days for issues/PRs are not currently supported but there is a PR - # open for it: https://github.com/actions/stale/issues/214 - days-before-stale: 30 - days-before-close: 7 - stale-issue-message: > - This issue has been automatically marked as stale because it has not had activity in the - last 30 days. It will be closed in the next 7 days unless it is tagged "help wanted" or "no stalebot" or other activity - occurs. Thank you for your contributions. - close-issue-message: > - This issue has been automatically closed because it has not had activity in the - last 37 days. If this issue is still valid, please ping a maintainer and ask them to label it as "help wanted" or "no stalebot". - Thank you for your contributions. - stale-pr-message: > - This pull request has been automatically marked as stale because it has not had - activity in the last 30 days. It will be closed in 7 days if no further activity occurs. Please - feel free to give a status update now, ping for review, or re-open when it's ready. - Thank you for your contributions! - close-pr-message: > - This pull request has been automatically closed because it has not had - activity in the last 37 days. Please feel free to give a status update now, ping for review, or re-open when it's ready. - Thank you for your contributions! - stale-issue-label: 'stale' - exempt-issue-labels: 'no stalebot,help wanted' - stale-pr-label: 'stale' - exempt-pr-labels: 'no stalebot' - operations-per-run: 500 - ascending: true \ No newline at end of file + - name: Prune Stale + uses: actions/stale@v3.0.14 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + # Different amounts of days for issues/PRs are not currently supported but there is a PR + # open for it: https://github.com/actions/stale/issues/214 + days-before-stale: 30 + days-before-close: 7 + stale-issue-message: > + This issue has been automatically marked as stale because it has not had activity in the + last 30 days. It will be closed in the next 7 days unless it is tagged "help wanted" or "no stalebot" or other activity + occurs. Thank you for your contributions. + close-issue-message: > + This issue has been automatically closed because it has not had activity in the + last 37 days. If this issue is still valid, please ping a maintainer and ask them to label it as "help wanted" or "no stalebot". + Thank you for your contributions. + stale-pr-message: > + This pull request has been automatically marked as stale because it has not had + activity in the last 30 days. It will be closed in 7 days if no further activity occurs. Please + feel free to give a status update now, ping for review, or re-open when it's ready. + Thank you for your contributions! + close-pr-message: > + This pull request has been automatically closed because it has not had + activity in the last 37 days. Please feel free to give a status update now, ping for review, or re-open when it's ready. + Thank you for your contributions! + stale-issue-label: "stale" + exempt-issue-labels: "no stalebot,help wanted" + stale-pr-label: "stale" + exempt-pr-labels: "no stalebot" + operations-per-run: 500 + ascending: true diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..f23dc042 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,31 @@ +default_language_version: + python: python3 +repos: + - repo: https://github.com/tekwizely/pre-commit-golang + rev: v1.0.0-beta.4 + hooks: + - id: go-imports + args: ["-w", "-local", "github.com/envoyproxy/ratelimit"] + - id: go-fumpt + args: ["-w"] + + - repo: https://github.com/pre-commit/mirrors-prettier + rev: "v2.4.1" + hooks: + - id: prettier + exclude: "test/config/bad_yaml.yaml" + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.0.1 + hooks: + - id: check-added-large-files + - id: check-case-conflict + - id: check-json + - id: check-merge-conflict + - id: end-of-file-fixer + - id: trailing-whitespace + + - repo: https://github.com/jumanjihouse/pre-commit-hooks + rev: 2.1.5 + hooks: + - id: shfmt diff --git a/.prettierrc.yaml b/.prettierrc.yaml new file mode 100644 index 00000000..abeaf738 --- /dev/null +++ b/.prettierrc.yaml @@ -0,0 +1 @@ +singleQuote: false diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5d641c98..9d43e10f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,15 +2,26 @@ We welcome contributions from the community. Here are some guidelines. # Coding style -* Ratelimit uses golang's `fmt` too. +- Ratelimit uses [gofumpt](https://github.com/mvdan/gofumpt) and [goimports](https://pkg.go.dev/golang.org/x/tools/cmd/goimports) for Go styling. # Submitting a PR -* Fork the repo and create your PR. -* Tests will automatically run for you. -* When all of the tests are passing, tag @envoyproxy/ratelimit-maintainers and +- Fork the repo. +- Before commiting any code, install the pre-commits by: + +```bash +make precommit_install +# Example usage if you want to run it manually +pre-commit run # Run against staged changes +pre-commit run -a # Run against all files +``` + +- Pre-commits will automatically format your code at commit time. +- Create your PR. +- Tests will automatically run for you. +- When all of the tests are passing, tag @envoyproxy/ratelimit-maintainers and we will review it and merge. -* Party time. +- Party time. # DCO: Sign your work diff --git a/Makefile b/Makefile index 739d8757..55fd1536 100644 --- a/Makefile +++ b/Makefile @@ -114,8 +114,14 @@ docker_image: docker_tests docker_push: docker_image docker push $(IMAGE):$(VERSION) -.PHONY: integration-tests -integration-tests: +.PHONY: integration_tests +integration_tests: docker-compose --project-dir $(PWD) -f integration-test/docker-compose-integration-test.yml up --build --exit-code-from tester -# docker-compose --project-dir $(PWD) -f integration-test/docker-compose-integration-test.yml up --build --exit-code-from tester +.PHONY: precommit_install +precommit_install: + python3 -m pip install -r requirements-dev.txt + go install mvdan.cc/gofumpt@v0.1.1 + go install mvdan.cc/sh/v3/cmd/shfmt@latest + go install golang.org/x/tools/cmd/goimports@v0.1.7 + pre-commit install diff --git a/OWNERS.md b/OWNERS.md index ac3a5660..a371de23 100644 --- a/OWNERS.md +++ b/OWNERS.md @@ -1,2 +1,2 @@ -* Matt Klein ([mattklein123](https://github.com/mattklein123)) (mklein@lyft.com) -* Yuki Sawa ([ysawa0](https://github.com/ysawa0)) (yukisawa@gmail.com) +- Matt Klein ([mattklein123](https://github.com/mattklein123)) (mklein@lyft.com) +- Yuki Sawa ([ysawa0](https://github.com/ysawa0)) (yukisawa@gmail.com) diff --git a/README.md b/README.md index 98861a14..33e367ba 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,5 @@ -**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* - [Overview](#overview) - [Docker Image](#docker-image) @@ -57,6 +56,7 @@ reads the configuration from disk via [runtime](https://github.com/lyft/goruntim decision is then returned to the caller. # Docker Image + For every main commit, an image is pushed to [Dockerhub](https://hub.docker.com/r/envoyproxy/ratelimit/tags?page=1&ordering=last_updated). There is currently no versioning (post v1.4.0) and tags are based on commit sha. # Supported Envoy APIs @@ -69,26 +69,27 @@ Support for [v2 rls proto](https://github.com/envoyproxy/data-plane-api/blob/mas 1. `v1.0.0` tagged on commit `0ded92a2af8261d43096eba4132e45b99a3b8b14`. Ratelimit has been in production use at Lyft for over 2 years. 2. `v1.1.0` introduces the data-plane-api proto and initiates the deprecation of the legacy [ratelimit.proto](https://github.com/lyft/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto). 3. `e91321b` [commit](https://github.com/envoyproxy/ratelimit/commit/e91321b10f1ad7691d0348e880bd75d0fca05758) deleted support for the legacy [ratelimit.proto](https://github.com/envoyproxy/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto). -The current version of ratelimit protocol is changed to [v3 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) -while [v2 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) is still supported -as a legacy protocol. + The current version of ratelimit protocol is changed to [v3 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) + while [v2 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) is still supported + as a legacy protocol. 4. `4bb32826` deleted support for legacy [v2 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) # Building and Testing -* Install Redis-server. -* Make sure go is setup correctly and checkout rate limit service into your go path. More information about installing -go [here](https://golang.org/doc/install). -* In order to run the integration tests using a local Redis server please run two Redis-server instances: one on port `6379` and another on port `6380` +- Install Redis-server. +- Make sure go is setup correctly and checkout rate limit service into your go path. More information about installing + go [here](https://golang.org/doc/install). +- In order to run the integration tests using a local Redis server please run two Redis-server instances: one on port `6379` and another on port `6380` ```bash redis-server --port 6379 & redis-server --port 6380 & ``` -* To setup for the first time (only done once): +- To setup for the first time (only done once): ```bash make bootstrap ``` -* To compile: +- To compile: + ```bash make compile ``` @@ -99,11 +100,11 @@ go [here](https://golang.org/doc/install). GOOS=linux make compile ``` -* To compile and run tests: +- To compile and run tests: ```bash make tests ``` -* To run the server locally using some sensible default settings you can do this (this will setup the server to read the configuration files from the path you specify): +- To run the server locally using some sensible default settings you can do this (this will setup the server to read the configuration files from the path you specify): ```bash USE_STATSD=false LOG_LEVEL=debug REDIS_SOCKET_TYPE=tcp REDIS_URL=localhost:6379 RUNTIME_ROOT=/home/user/src/runtime/data RUNTIME_SUBDIRECTORY=ratelimit ``` @@ -124,11 +125,15 @@ the docker-compose.yml file to run a second redis container, and change the envi as explained in the [two redis instances](#two-redis-instances) section. ## Full test environment + To run a fully configured environment to demo Envoy based rate limiting, run: + ```bash docker-compose -f docker-compose-example.yml up --build --remove-orphans ``` + This will run ratelimit, redis, prom-statsd-exporter and two Envoy containers such that you can demo rate limiting by hitting the below endpoints. + ```bash curl localhost:8888/test curl localhost:8888/header -H "foo: foo" # Header based @@ -138,11 +143,13 @@ curl localhost:8888/twoheader -H "foo: foo" -H "bar: banned" # Ban a particular curl localhost:8888/twoheader -H "foo: foo" -H "baz: shady" # This will never be ratelimited since "baz" with value "shady" is in shadow_mode curl localhost:8888/twoheader -H "foo: foo" -H "baz: not-so-shady" # This is subject to rate-limiting because the it's now in shadow_mode ``` + Edit `examples/ratelimit/config/example.yaml` to test different rate limit configs. Hot reloading is enabled. The descriptors in `example.yaml` and the actions in `examples/envoy/proxy.yaml` should give you a good idea on how to configure rate limits. To see the metrics in the example + ```bash # The metrics for the shadow_mode keys curl http://localhost:9102/metrics | grep -i shadow @@ -157,7 +164,7 @@ The test suite will spin up a docker-compose environment from `integration-test/ If the test suite fails it will exit with code 1. ```bash -make integration-tests +make integration_tests ``` # Configuration @@ -168,14 +175,14 @@ The rate limit configuration file format is YAML (mainly so that comments are su ### Definitions -* **Domain:** A domain is a container for a set of rate limits. All domains known to the Ratelimit service must be -globally unique. They serve as a way for different teams/projects to have rate limit configurations that don't conflict. -* **Descriptor:** A descriptor is a list of key/value pairs owned by a domain that the Ratelimit service uses to -select the correct rate limit to use when limiting. Descriptors are case-sensitive. Examples of descriptors are: - * ("database", "users") - * ("message_type", "marketing"),("to_number","2061234567") - * ("to_cluster", "service_a") - * ("to_cluster", "service_a"),("from_cluster", "service_b") +- **Domain:** A domain is a container for a set of rate limits. All domains known to the Ratelimit service must be + globally unique. They serve as a way for different teams/projects to have rate limit configurations that don't conflict. +- **Descriptor:** A descriptor is a list of key/value pairs owned by a domain that the Ratelimit service uses to + select the correct rate limit to use when limiting. Descriptors are case-sensitive. Examples of descriptors are: + - ("database", "users") + - ("message_type", "marketing"),("to_number","2061234567") + - ("to_cluster", "service_a") + - ("to_cluster", "service_a"),("from_cluster", "service_b") ### Descriptor list definition @@ -212,6 +219,7 @@ Currently the service supports per second, minute, hour, and day limits. More ty future based on user demand. ### ShadowMode + A shadow_mode key in a rule indicates that whatever the outcome of the evaluation of the rule, the end-result will always be "OK". When a block is in ShadowMode all functions of the rate limiting service are executed as normal, with cache-lookup and statistics @@ -272,7 +280,7 @@ descriptors: In the preceding example, the domain is "messaging" and we setup two different scenarios that illustrate more complex functionality. First, we want to limit on marketing messages to a specific number. To enable this, we make -use of *nested descriptor lists.* The top level descriptor is ("message_type", "marketing"). However this descriptor +use of _nested descriptor lists._ The top level descriptor is ("message_type", "marketing"). However this descriptor does not have a limit assigned so it's just a placeholder. Contained within this entry we have another descriptor list that includes an entry with key "to_number". However, notice that no value is provided. This means that the service will match against any value supplied for "to_number" and generate a unique limit. Thus, ("message_type", "marketing"), @@ -283,7 +291,7 @@ The configuration also sets up another rule without a value. This one creates an any particular number during a 1 day period. Thus, ("to_number", "2061111111") and ("to_number", "2062222222") both get 100 requests per day. -When calling the rate limit service, the client can specify *multiple descriptors* to limit on in a single call. This +When calling the rate limit service, the client can specify _multiple descriptors_ to limit on in a single call. This limits round trips and allows limiting on aggregate rule definitions. For example, using the preceding configuration, the client could send this complete request (in pseudo IDL): @@ -294,7 +302,7 @@ RateLimitRequest: descriptor: ("to_number", "2061111111") ``` -And the service will rate limit against *all* matching rules and return an aggregate result; a logical OR of all +And the service will rate limit against _all_ matching rules and return an aggregate result; a logical OR of all the individual rate limit decisions. #### Example 3 @@ -318,11 +326,12 @@ descriptors: ``` In the preceding example, we setup a generic rate limit for individual IP addresses. The architecture's edge proxy can -be configured to make a rate limit service call with the descriptor ("remote_address", "50.0.0.1") for example. This IP would +be configured to make a rate limit service call with the descriptor `("remote_address", "50.0.0.1")` for example. This IP would get 10 requests per second as would any other IP. However, the configuration also contains a second configuration that explicitly defines a -value along with the same key. If the descriptor ("remote_address", "50.0.0.5") is received, the service will -*attempt the most specific match possible*. This means +value along with the same key. +If the descriptor `("remote_address", "50.0.0.5")` is received, the service +will _attempt the most specific match possible_. This means the most specific descriptor at the same level as your request. Thus, key/value is always attempted as a match before just key. #### Example 4 @@ -387,9 +396,9 @@ This can be useful for collecting statistics, or if one wants to define a descri The return value for unlimited descriptors will be an OK status code with the LimitRemaining field set to MaxUint32 value. - ### Example 6 +### Example 6 - A rule using shadow_mode is useful for soft-launching rate limiting. In this example +A rule using shadow_mode is useful for soft-launching rate limiting. In this example ``` RateLimitRequest: @@ -419,7 +428,6 @@ descriptors: unit: second ``` - ## Loading Configuration The Ratelimit service uses a library written by Lyft called [goruntime](https://github.com/lyft/goruntime) to do configuration loading. Goruntime monitors @@ -437,6 +445,7 @@ RUNTIME_IGNOREDOTFILES default:"false" **Configuration files are loaded from RUNTIME_ROOT/RUNTIME_SUBDIRECTORY/config/\*.yaml** There are two methods for triggering a configuration reload: + 1. Symlink RUNTIME_ROOT to a different directory. 2. Update the contents inside `RUNTIME_ROOT/RUNTIME_SUBDIRECTORY/config/` directly. @@ -467,6 +476,7 @@ LOG_FORMAT=json ``` Output example: + ``` {"@message":"loading domain: messaging","@timestamp":"2020-09-10T17:22:44.926010192Z","level":"debug"} {"@message":"loading descriptor: key=messaging.message_type_marketing","@timestamp":"2020-09-10T17:22:44.926019315Z","level":"debug"} @@ -479,10 +489,12 @@ Output example: ``` ## GRPC Keepalive + Client-side GRPC DNS re-resolution in scenarios with auto scaling enabled might not work as expected and the current workaround is to [configure connection keepalive](https://github.com/grpc/grpc/issues/12295#issuecomment-382794204) on server-side. The behavior can be fixed by configuring the following env variables for the ratelimit server: -* `GRPC_MAX_CONNECTION_AGE`: a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway. A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms. -* `GRPC_MAX_CONNECTION_AGE_GRACE`: an additive period after MaxConnectionAge after which the connection will be forcibly closed. + +- `GRPC_MAX_CONNECTION_AGE`: a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway. A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms. +- `GRPC_MAX_CONNECTION_AGE_GRACE`: an additive period after MaxConnectionAge after which the connection will be forcibly closed. # Request Fields @@ -490,11 +502,15 @@ For information on the fields of a Ratelimit gRPC request please read the inform on the RateLimitRequest message type in the Ratelimit [proto file.](https://github.com/envoyproxy/envoy/blob/master/api/envoy/service/ratelimit/v3/rls.proto) # GRPC Client + The [gRPC client](https://github.com/envoyproxy/ratelimit/blob/master/src/client_cmd/main.go) will interact with ratelimit server and tell you if the requests are over limit. + ## Commandline flags -* `-dial_string`: used to specify the address of ratelimit server. It defaults to `localhost:8081`. -* `-domain`: used to specify the domain. -* `-descriptors`: used to specify one descriptor. You can pass multiple descriptors like following: + +- `-dial_string`: used to specify the address of ratelimit server. It defaults to `localhost:8081`. +- `-domain`: used to specify the domain. +- `-descriptors`: used to specify one descriptor. You can pass multiple descriptors like following: + ``` go run main.go -domain test \ -descriptors name=foo,age=14 -descriptors name=bar,age=18 @@ -505,13 +521,14 @@ go run main.go -domain test \ There is a global shadow-mode which can make it easier to introduce rate limiting into an existing service landscape. It will override whatever result is returned by the regular rate limiting process. ## Configuration + The global shadow mode is configured with an environment variable Setting environment variable`SHADOW_MODE` to `true` will enable the feature. ## Statistics -There is an additional service-level statistics generated that will increment whenever the global shadow mode has overridden a rate limiting result. +There is an additional service-level statistics generated that will increment whenever the global shadow mode has overridden a rate limiting result. # Statistics @@ -526,17 +543,20 @@ ratelimit.service.rate_limit.DOMAIN.KEY_VALUE.STAT ``` DOMAIN: -* As specified in the domain value in the YAML runtime file + +- As specified in the domain value in the YAML runtime file KEY_VALUE: -* A combination of the key value -* Nested descriptors would be suffixed in the stats path + +- A combination of the key value +- Nested descriptors would be suffixed in the stats path STAT: -* near_limit: Number of rule hits over the NearLimit ratio threshold (currently 80%) but under the threshold rate. -* over_limit: Number of rule hits exceeding the threshold rate -* total_hits: Number of rule hits in total -* shadow_mode: Number of rule hits where shadow_mode would trigger and override the over_limit result + +- near_limit: Number of rule hits over the NearLimit ratio threshold (currently 80%) but under the threshold rate. +- over_limit: Number of rule hits exceeding the threshold rate +- total_hits: Number of rule hits in total +- shadow_mode: Number of rule hits where shadow_mode would trigger and override the over_limit result To use a custom near_limit ratio threshold, you can specify with `NEAR_LIMIT_RATIO` environment variable. It defaults to `0.8` (0-1 scale). These are examples of generated stats for some configured rate limit rules from the above examples: @@ -559,28 +579,29 @@ ratelimit.service.rate_limit.messaging.auth-service.over_limit.shadow_mode: 1 # HTTP Port The ratelimit service listens to HTTP 1.1 (by default on port 8080) with two endpoints: + 1. /healthcheck → return a 200 if this service is healthy 1. /json → HTTP 1.1 endpoint for interacting with ratelimit service ## /json endpoint Takes an HTTP POST with a JSON body of the form e.g. + ```json { "domain": "dummy", "descriptors": [ - {"entries": [ - {"key": "one_per_day", - "value": "something"} - ]} + { "entries": [{ "key": "one_per_day", "value": "something" }] } ] } ``` + The service will return an http 200 if this request is allowed (if no ratelimits exceeded) or 429 if one or more ratelimits were exceeded. The response is a RateLimitResponse encoded with [proto3-to-json mapping](https://developers.google.com/protocol-buffers/docs/proto3#json): + ```json { "overallCode": "OVER_LIMIT", @@ -656,10 +677,10 @@ By default, for each request, ratelimit will pick up a connection from pool, wri For high throughput scenarios, ratelimit also support [implicit pipelining](https://github.com/mediocregopher/radix/blob/v3.5.1/pool.go#L238) . It can be configured using the following environment variables: -1. `REDIS_PIPELINE_WINDOW` & `REDIS_PERSECOND_PIPELINE_WINDOW`: sets the duration after which internal pipelines will be flushed. -If window is zero then implicit pipelining will be disabled. +1. `REDIS_PIPELINE_WINDOW` & `REDIS_PERSECOND_PIPELINE_WINDOW`: sets the duration after which internal pipelines will be flushed. + If window is zero then implicit pipelining will be disabled. 1. `REDIS_PIPELINE_LIMIT` & `REDIS_PERSECOND_PIPELINE_LIMIT`: sets maximum number of commands that can be pipelined before flushing. -If limit is zero then no limit will be used and pipelines will only be limited by the specified time window. + If limit is zero then no limit will be used and pipelines will only be limited by the specified time window. `implicit pipelining` is disabled by default. To enable it, you can use default values [used by radix](https://github.com/mediocregopher/radix/blob/v3.5.1/pool.go#L278) and tune for the optimal value. @@ -713,9 +734,11 @@ When using multiple memcache nodes in `MEMCACHE_HOST_PORT=`, one should provide to all ratelimiter instances to ensure that a particular cache key is always hashed to the same memcache node. # Custom headers + Ratelimit service can be configured to return custom headers with the ratelimit information. It will populate the response_headers_to_add as part of the [RateLimitResponse](https://www.envoyproxy.io/docs/envoy/latest/api-v3/service/ratelimit/v3/rls.proto#service-ratelimit-v3-ratelimitresponse). The following environment variables control the custom response feature: + 1. `LIMIT_RESPONSE_HEADERS_ENABLED` - Enables the custom response headers 1. `LIMIT_LIMIT_HEADER` - The default value is "RateLimit-Limit", setting the environment variable will specify an alternative header name 1. `LIMIT_REMAINING_HEADER` - The default value is "RateLimit-Remaining", setting the environment variable will specify an alternative header name @@ -723,13 +746,13 @@ The following environment variables control the custom response feature: # Contact -* [envoy-announce](https://groups.google.com/forum/#!forum/envoy-announce): Low frequency mailing +- [envoy-announce](https://groups.google.com/forum/#!forum/envoy-announce): Low frequency mailing list where we will email announcements only. -* [envoy-users](https://groups.google.com/forum/#!forum/envoy-users): General user discussion. +- [envoy-users](https://groups.google.com/forum/#!forum/envoy-users): General user discussion. Please add `[ratelimit]` to the email subject. -* [envoy-dev](https://groups.google.com/forum/#!forum/envoy-dev): Envoy developer discussion (APIs, +- [envoy-dev](https://groups.google.com/forum/#!forum/envoy-dev): Envoy developer discussion (APIs, feature design, etc.). Please add `[ratelimit]` to the email subject. -* [Slack](https://envoyproxy.slack.com/): Slack, to get invited go [here](http://envoyslack.cncf.io). +- [Slack](https://envoyproxy.slack.com/): Slack, to get invited go [here](http://envoyslack.cncf.io). We have the IRC/XMPP gateways enabled if you prefer either of those. Once an account is created, connection instructions for IRC/XMPP can be found [here](https://envoyproxy.slack.com/account/gateways). The `#ratelimit-users` channel is used for discussions about the ratelimit service. diff --git a/docker-compose-example.yml b/docker-compose-example.yml index 9739feaa..ba4cc69e 100644 --- a/docker-compose-example.yml +++ b/docker-compose-example.yml @@ -65,11 +65,11 @@ services: networks: - ratelimit-network expose: - - "8888" - - "8001" + - "8888" + - "8001" ports: - - "8888:8888" - - "8001:8001" + - "8888:8888" + - "8001:8001" envoy-mock: image: envoyproxy/envoy-dev:latest @@ -86,9 +86,9 @@ services: networks: - ratelimit-network expose: - - "9999" + - "9999" ports: - - "9999:9999" + - "9999:9999" networks: ratelimit-network: diff --git a/docker-compose.yml b/docker-compose.yml index 88d3a86e..8271245e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,6 +1,5 @@ version: "3" services: - redis: image: redis:alpine expose: @@ -39,7 +38,7 @@ services: ratelimit: image: alpine:3.6 command: > - sh -c "until test -f /usr/local/bin/ratelimit; do sleep 5; done; /usr/local/bin/ratelimit" + sh -c "until test -f /usr/local/bin/ratelimit; do sleep 5; done; /usr/local/bin/ratelimit" ports: - 8080:8080 - 8081:8081 diff --git a/examples/prom-statsd-exporter/conf.yaml b/examples/prom-statsd-exporter/conf.yaml index f6649c2f..31f16dd6 100644 --- a/examples/prom-statsd-exporter/conf.yaml +++ b/examples/prom-statsd-exporter/conf.yaml @@ -1,59 +1,51 @@ mappings: # Requires statsd exporter >= v0.6.0 since it uses the "drop" action. - - match: - "ratelimit.service.rate_limit.*.*.near_limit" + - match: "ratelimit.service.rate_limit.*.*.near_limit" name: "ratelimit_service_rate_limit_near_limit" timer_type: "histogram" labels: domain: "$1" key1: "$2" - - match: - "ratelimit.service.rate_limit.*.*.over_limit" + - match: "ratelimit.service.rate_limit.*.*.over_limit" name: "ratelimit_service_rate_limit_over_limit" timer_type: "histogram" labels: domain: "$1" key1: "$2" - - match: - "ratelimit.service.rate_limit.*.*.total_hits" + - match: "ratelimit.service.rate_limit.*.*.total_hits" name: "ratelimit_service_rate_limit_total_hits" timer_type: "histogram" labels: domain: "$1" key1: "$2" - - match: - "ratelimit.service.rate_limit.*.*.within_limit" + - match: "ratelimit.service.rate_limit.*.*.within_limit" name: "ratelimit_service_rate_limit_within_limit" timer_type: "histogram" labels: domain: "$1" key1: "$2" - - match: - "ratelimit.service.rate_limit.*.*.*.near_limit" + - match: "ratelimit.service.rate_limit.*.*.*.near_limit" name: "ratelimit_service_rate_limit_near_limit" timer_type: "histogram" labels: domain: "$1" key1: "$2" key2: "$3" - - match: - "ratelimit.service.rate_limit.*.*.*.over_limit" + - match: "ratelimit.service.rate_limit.*.*.*.over_limit" name: "ratelimit_service_rate_limit_over_limit" timer_type: "histogram" labels: domain: "$1" key1: "$2" key2: "$3" - - match: - "ratelimit.service.rate_limit.*.*.*.total_hits" + - match: "ratelimit.service.rate_limit.*.*.*.total_hits" name: "ratelimit_service_rate_limit_total_hits" timer_type: "histogram" labels: domain: "$1" key1: "$2" key2: "$3" - - match: - "ratelimit.service.rate_limit.*.*.*.within_limit" + - match: "ratelimit.service.rate_limit.*.*.*.within_limit" name: "ratelimit_service_rate_limit_within_limit" timer_type: "histogram" labels: @@ -86,8 +78,7 @@ mappings: # Requires statsd exporter >= v0.6.0 since it uses the "drop" action. name: "ratelimit_service_config_load_error" match_metric_type: counter - - match: - "ratelimit.service.rate_limit.*.*.*.shadow_mode" + - match: "ratelimit.service.rate_limit.*.*.*.shadow_mode" name: "ratelimit_service_rate_limit_shadow_mode" timer_type: "histogram" labels: @@ -95,7 +86,6 @@ mappings: # Requires statsd exporter >= v0.6.0 since it uses the "drop" action. key1: "$2" key2: "$3" - # Enable below in production once you have the metrics you need # - match: "." # match_type: "regex" diff --git a/integration-test/docker-compose-integration-test.yml b/integration-test/docker-compose-integration-test.yml index 7cfd2eb9..d37264cb 100644 --- a/integration-test/docker-compose-integration-test.yml +++ b/integration-test/docker-compose-integration-test.yml @@ -69,11 +69,11 @@ services: networks: - ratelimit-network expose: - - "8888" - - "8001" + - "8888" + - "8001" ports: - - "8888:8888" - - "8001:8001" + - "8888:8888" + - "8001:8001" envoy-mock: image: envoyproxy/envoy-dev:latest @@ -90,9 +90,9 @@ services: networks: - ratelimit-network expose: - - "9999" + - "9999" ports: - - "9999:9999" + - "9999:9999" tester: build: diff --git a/integration-test/run-all.sh b/integration-test/run-all.sh index 58987603..fe8c94ee 100755 --- a/integration-test/run-all.sh +++ b/integration-test/run-all.sh @@ -3,13 +3,12 @@ echo "Running tests" FILES=/test/scripts/* -for f in $FILES -do - echo "Processing $f file..." - # take action on each file. $f store current file name - $f - if [ $? -ne 0 ] ; then - echo "Failed file $f" - exit 1 - fi -done \ No newline at end of file +for f in $FILES; do + echo "Processing $f file..." + # take action on each file. $f store current file name + $f + if [ $? -ne 0 ]; then + echo "Failed file $f" + exit 1 + fi +done diff --git a/integration-test/scripts/simple-get.sh b/integration-test/scripts/simple-get.sh index 54233885..74466c50 100755 --- a/integration-test/scripts/simple-get.sh +++ b/integration-test/scripts/simple-get.sh @@ -3,6 +3,6 @@ # Just happy path curl -s -f -H "foo: test" -H "baz: shady" http://envoy-proxy:8888/twoheader -if [ $? -ne 0 ] ; then - exit 1 -fi \ No newline at end of file +if [ $? -ne 0 ]; then + exit 1 +fi diff --git a/integration-test/scripts/trigger-ratelimit.sh b/integration-test/scripts/trigger-ratelimit.sh index 49b36ada..57f42080 100755 --- a/integration-test/scripts/trigger-ratelimit.sh +++ b/integration-test/scripts/trigger-ratelimit.sh @@ -5,24 +5,24 @@ # Has rate limit quota 3 req / min # -response=`curl -f -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader` -response=`curl -f -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader` -response=`curl -f -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader` +response=$(curl -f -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader) +response=$(curl -f -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader) +response=$(curl -f -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader) -if [ $? -ne 0 ] ; then - echo "Rate limit should not trigger yet" - exit 1 +if [ $? -ne 0 ]; then + echo "Rate limit should not trigger yet" + exit 1 fi -response=`curl -f -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader` +response=$(curl -f -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader) -if [ $? -eq 0 ] ; then - echo "Rate limiting should fail the request" - exit 1 +if [ $? -eq 0 ]; then + echo "Rate limiting should fail the request" + exit 1 fi -response=`curl -i -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader | grep "Too Many Requests"` -if [ $? -ne 0 ] ; then - echo "This should trigger a ratelimit" - exit 1 +response=$(curl -i -s -H "foo: pelle" -H "baz: not-so-shady" http://envoy-proxy:8888/twoheader | grep "Too Many Requests") +if [ $? -ne 0 ]; then + echo "This should trigger a ratelimit" + exit 1 fi diff --git a/integration-test/scripts/trigger-shadow-mode-key.sh b/integration-test/scripts/trigger-shadow-mode-key.sh index b72ec7d5..3204284e 100755 --- a/integration-test/scripts/trigger-shadow-mode-key.sh +++ b/integration-test/scripts/trigger-shadow-mode-key.sh @@ -6,20 +6,20 @@ # shadow_mode is true # -response=`curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader` -response=`curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader` -response=`curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader` -response=`curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader` -response=`curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader` +response=$(curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader) +response=$(curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader) +response=$(curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader) +response=$(curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader) +response=$(curl -f -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader) -if [ $? -ne 0 ] ; then - echo "Shadow Mode key should not trigger an error, even if we have exceeded the quota" - exit 1 +if [ $? -ne 0 ]; then + echo "Shadow Mode key should not trigger an error, even if we have exceeded the quota" + exit 1 fi -remaining=`curl -i -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader | grep x-ratelimit-remaining | cut -d: -f2 | cut -d: -f2 | sed 's/ //g'` +remaining=$(curl -i -s -H "foo: pelle" -H "baz: shady" http://envoy-proxy:8888/twoheader | grep x-ratelimit-remaining | cut -d: -f2 | cut -d: -f2 | sed 's/ //g') -if [ $remaining == "0" ] ; then - echo "Remaining should be zero" - exit 1 +if [ $remaining == "0" ]; then + echo "Remaining should be zero" + exit 1 fi diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 00000000..7a82dfc2 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1 @@ +pre-commit>=2.9.3 diff --git a/script/docs_check_format b/script/docs_check_format index 3fb4faf5..6891f0a4 100755 --- a/script/docs_check_format +++ b/script/docs_check_format @@ -1,9 +1,9 @@ #!/bin/bash if ! npm list -g | grep -q doctoc; then - npm install -g doctoc + npm install -g doctoc fi -TEMP_DIR=`mktemp -d` +TEMP_DIR=$(mktemp -d) cp README.md $TEMP_DIR/ doctoc $TEMP_DIR @@ -13,6 +13,6 @@ DIFF_RESULT=$? rm -fr $TEMP_DIR if [[ $DIFF_RESULT != 0 ]]; then - echo "doc formatting is invalid. run make fix_format" - exit 1 + echo "doc formatting is invalid. run make fix_format" + exit 1 fi diff --git a/script/docs_fix_format b/script/docs_fix_format index c0c1da3e..e8be01b6 100755 --- a/script/docs_fix_format +++ b/script/docs_fix_format @@ -1,6 +1,6 @@ #!/bin/bash if ! npm list -g | grep -q doctoc; then - npm install -g doctoc + npm install -g doctoc fi doctoc README.md diff --git a/script/lint b/script/lint index 564b8860..353c4047 100755 --- a/script/lint +++ b/script/lint @@ -1,6 +1,5 @@ #!/usr/bin/env bash -if [ ! -z "$SRCPATH" ] -then +if [ ! -z "$SRCPATH" ]; then cd $SRCPATH export PATH=/go/bin:$PATH fi @@ -9,7 +8,7 @@ PKGS=$(find . -maxdepth 3 -type d | sed s/\.\\/// | grep -vE '.git|\.|script|ven LINT_PKGS=$(echo ${PKGS} | grep -v 'tests/generated') for pkg in $LINT_PKGS; do - golint $pkg | grep -v comment + golint $pkg | grep -v comment done go vet $(glide nv) diff --git a/src/config/config.go b/src/config/config.go index d174f100..d6f7e530 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -3,8 +3,9 @@ package config import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/envoyproxy/ratelimit/src/stats" "golang.org/x/net/context" + + "github.com/envoyproxy/ratelimit/src/stats" ) // Errors that may be raised during config parsing. diff --git a/src/config/config_impl.go b/src/config/config_impl.go index aca94425..ec3fa4a6 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -6,10 +6,11 @@ import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/envoyproxy/ratelimit/src/stats" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" "gopkg.in/yaml.v2" + + "github.com/envoyproxy/ratelimit/src/stats" ) type yamlRateLimit struct { @@ -96,7 +97,6 @@ func newRateLimitConfigError(config RateLimitConfigToLoad, err string) RateLimit // @param descriptors supplies the YAML descriptors to load. // @param statsManager that owns the stats.Scope. func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, parentKey string, descriptors []yamlDescriptor, statsManager stats.Manager) { - for _, descriptorConfig := range descriptors { if descriptorConfig.Key == "" { panic(newRateLimitConfigError(config, "descriptor has empty key")) diff --git a/src/config_check_cmd/main.go b/src/config_check_cmd/main.go index e451694c..0c7480e2 100644 --- a/src/config_check_cmd/main.go +++ b/src/config_check_cmd/main.go @@ -3,14 +3,16 @@ package main import ( "flag" "fmt" - "github.com/envoyproxy/ratelimit/src/settings" - "github.com/envoyproxy/ratelimit/src/stats" "io/ioutil" "os" "path/filepath" - "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/stats" + gostats "github.com/lyft/gostats" + + "github.com/envoyproxy/ratelimit/src/config" ) func loadConfigs(allConfigs []config.RateLimitConfigToLoad) { diff --git a/src/limiter/base_limiter.go b/src/limiter/base_limiter.go index cbdde93a..f1a7de22 100644 --- a/src/limiter/base_limiter.go +++ b/src/limiter/base_limiter.go @@ -6,11 +6,12 @@ import ( "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + logger "github.com/sirupsen/logrus" + "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/stats" "github.com/envoyproxy/ratelimit/src/utils" - logger "github.com/sirupsen/logrus" ) type BaseRateLimiter struct { @@ -33,8 +34,10 @@ type LimitInfo struct { func NewRateLimitInfo(limit *config.RateLimit, limitBeforeIncrease uint32, limitAfterIncrease uint32, nearLimitThreshold uint32, overLimitThreshold uint32) *LimitInfo { - return &LimitInfo{limit: limit, limitBeforeIncrease: limitBeforeIncrease, limitAfterIncrease: limitAfterIncrease, - nearLimitThreshold: nearLimitThreshold, overLimitThreshold: overLimitThreshold} + return &LimitInfo{ + limit: limit, limitBeforeIncrease: limitBeforeIncrease, limitAfterIncrease: limitAfterIncrease, + nearLimitThreshold: nearLimitThreshold, overLimitThreshold: overLimitThreshold, + } } // Generates cache keys for given rate limit request. Each cache key is represented by a concatenation of diff --git a/src/limiter/cache.go b/src/limiter/cache.go index 5ca07ede..eb914eb1 100644 --- a/src/limiter/cache.go +++ b/src/limiter/cache.go @@ -2,8 +2,9 @@ package limiter import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/envoyproxy/ratelimit/src/config" "golang.org/x/net/context" + + "github.com/envoyproxy/ratelimit/src/config" ) // Interface for interacting with a cache backend for rate limiting. diff --git a/src/limiter/cache_key.go b/src/limiter/cache_key.go index 797cc2e1..4aeab204 100644 --- a/src/limiter/cache_key.go +++ b/src/limiter/cache_key.go @@ -7,6 +7,7 @@ import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/utils" ) @@ -74,5 +75,6 @@ func (this *CacheKeyGenerator) GenerateCacheKey( return CacheKey{ Key: b.String(), - PerSecond: isPerSecondLimit(limit.Limit.Unit)} + PerSecond: isPerSecondLimit(limit.Limit.Unit), + } } diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index 72bc216f..5ddbe664 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -17,12 +17,13 @@ package memcached import ( "context" - "github.com/envoyproxy/ratelimit/src/stats" "math/rand" "strconv" "sync" "time" + "github.com/envoyproxy/ratelimit/src/stats" + "github.com/coocood/freecache" gostats "github.com/lyft/gostats" diff --git a/src/metrics/metrics.go b/src/metrics/metrics.go index c1eb1109..fffd9dce 100644 --- a/src/metrics/metrics.go +++ b/src/metrics/metrics.go @@ -2,9 +2,10 @@ package metrics import ( "context" + "time" + stats "github.com/lyft/gostats" "google.golang.org/grpc" - "time" ) type serverMetrics struct { diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index 9bec14bb..1c9417d7 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -4,6 +4,7 @@ import ( "math/rand" "github.com/coocood/freecache" + "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/server" "github.com/envoyproxy/ratelimit/src/settings" diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index 6997e5dc..26c3902e 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -7,11 +7,12 @@ import ( "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + logger "github.com/sirupsen/logrus" + "golang.org/x/net/context" + "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/utils" - logger "github.com/sirupsen/logrus" - "golang.org/x/net/context" ) type fixedRateLimitCacheImpl struct { diff --git a/src/server/server_impl.go b/src/server/server_impl.go index c3f3dbb7..bfd66616 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -4,27 +4,24 @@ import ( "bytes" "expvar" "fmt" - "google.golang.org/grpc/keepalive" "io" + "net" "net/http" "net/http/pprof" + "os" + "os/signal" "path/filepath" "sort" "strconv" "sync" - - "github.com/envoyproxy/ratelimit/src/stats" - - "os" - "os/signal" "syscall" - "net" + "google.golang.org/grpc/keepalive" + + "github.com/envoyproxy/ratelimit/src/stats" "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/envoyproxy/ratelimit/src/limiter" - "github.com/envoyproxy/ratelimit/src/settings" "github.com/golang/protobuf/jsonpb" "github.com/gorilla/mux" reuseport "github.com/kavu/go_reuseport" @@ -34,6 +31,9 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/health" healthpb "google.golang.org/grpc/health/grpc_health_v1" + + "github.com/envoyproxy/ratelimit/src/limiter" + "github.com/envoyproxy/ratelimit/src/settings" ) type serverDebugListener struct { @@ -215,7 +215,6 @@ func newServer(s settings.Settings, name string, statsManager stats.Manager, loc ret.store.ScopeWithTags("runtime", s.ExtraTags), &loader.SymlinkRefresher{RuntimePath: s.RuntimePath}, loaderOpts...) - } else { ret.runtime = loader.New( filepath.Join(s.RuntimePath, s.RuntimeSubdirectory), diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index 67004ecf..6b4b2347 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -14,13 +14,14 @@ import ( core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/lyft/goruntime/loader" + logger "github.com/sirupsen/logrus" + "golang.org/x/net/context" + "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" - "github.com/lyft/goruntime/loader" - logger "github.com/sirupsen/logrus" - "golang.org/x/net/context" ) type RateLimitServiceServer interface { @@ -210,7 +211,6 @@ func (this *service) shouldRateLimitWorker( } func (this *service) rateLimitLimitHeader(descriptor *pb.RateLimitResponse_DescriptorStatus) *core.HeaderValue { - // Limit header only provides the mandatory part from the spec, the actual limit // the optional quota policy is currently not provided return &core.HeaderValue{ @@ -220,7 +220,6 @@ func (this *service) rateLimitLimitHeader(descriptor *pb.RateLimitResponse_Descr } func (this *service) rateLimitRemainingHeader(descriptor *pb.RateLimitResponse_DescriptorStatus) *core.HeaderValue { - // How much of the limit is remaining return &core.HeaderValue{ Key: this.customHeaderRemainingHeader, diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 54d8d570..8be85288 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -17,6 +17,8 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + logger "github.com/sirupsen/logrus" + "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/memcached" @@ -25,7 +27,6 @@ import ( ratelimit "github.com/envoyproxy/ratelimit/src/service" "github.com/envoyproxy/ratelimit/src/settings" "github.com/envoyproxy/ratelimit/src/utils" - logger "github.com/sirupsen/logrus" ) type Runner struct { diff --git a/src/srv/srv.go b/src/srv/srv.go index 041ceb95..7262d1b4 100644 --- a/src/srv/srv.go +++ b/src/srv/srv.go @@ -23,14 +23,12 @@ func ParseSrv(srv string) (string, string, string, error) { func ServerStringsFromSrv(srv string) ([]string, error) { service, proto, name, err := ParseSrv(srv) - if err != nil { logger.Errorf("failed to parse SRV: %s", err) return nil, err } _, srvs, err := net.LookupSRV(service, proto, name) - if err != nil { logger.Errorf("failed to lookup SRV: %s", err) return nil, err diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index 7748dde7..effad309 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -1,9 +1,10 @@ package stats import ( - "github.com/envoyproxy/ratelimit/src/settings" gostats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" + + "github.com/envoyproxy/ratelimit/src/settings" ) func NewStatManager(store gostats.Store, settings settings.Settings) *ManagerImpl { diff --git a/test/common/common.go b/test/common/common.go index 54216f1f..0fec1ae1 100644 --- a/test/common/common.go +++ b/test/common/common.go @@ -125,7 +125,6 @@ func startCacheProcess(ctx context.Context, command string, args []string, port }() err := cmd.Start() - if err != nil { cancel() return nil, fmt.Errorf("Problem starting %s subprocess: %v", command, err) diff --git a/test/config/config_test.go b/test/config/config_test.go index 3caaf2cc..d22bdcaf 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -9,10 +9,11 @@ import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" pb_type "github.com/envoyproxy/go-control-plane/envoy/type/v3" - "github.com/envoyproxy/ratelimit/src/config" - mockstats "github.com/envoyproxy/ratelimit/test/mocks/stats" stats "github.com/lyft/gostats" "github.com/stretchr/testify/assert" + + "github.com/envoyproxy/ratelimit/src/config" + mockstats "github.com/envoyproxy/ratelimit/test/mocks/stats" ) func loadFile(path string) []config.RateLimitConfigToLoad { @@ -350,7 +351,6 @@ func TestMisspelledKey(t *testing.T) { config.NewRateLimitConfigImpl( loadFile("misspelled_key2.yaml"), mockstats.NewMockStatManager(stats.NewStore(stats.NewNullSink(), false))) - }, "misspelled_key2.yaml: config error, unknown key 'requestsperunit'") } diff --git a/test/config/duplicate_key.yaml b/test/config/duplicate_key.yaml index daa6a3da..5551c1f2 100644 --- a/test/config/duplicate_key.yaml +++ b/test/config/duplicate_key.yaml @@ -4,4 +4,4 @@ descriptors: value: value1 - key: key1 - value: value1 \ No newline at end of file + value: value1 diff --git a/test/config/misspelled_key.yaml b/test/config/misspelled_key.yaml index 2b2d0a07..2dea25b7 100644 --- a/test/config/misspelled_key.yaml +++ b/test/config/misspelled_key.yaml @@ -4,4 +4,4 @@ descriptors: value: value1 ratelimit: unit: day - requests_per_unit: 5 \ No newline at end of file + requests_per_unit: 5 diff --git a/test/config/misspelled_key2.yaml b/test/config/misspelled_key2.yaml index cbb7f504..f9ba3dce 100644 --- a/test/config/misspelled_key2.yaml +++ b/test/config/misspelled_key2.yaml @@ -4,4 +4,4 @@ descriptors: value: value1 rate_limit: unit: day - requestsperunit: 5 \ No newline at end of file + requestsperunit: 5 diff --git a/test/config/non_map_list.yaml b/test/config/non_map_list.yaml index 5a075903..c5a0da52 100644 --- a/test/config/non_map_list.yaml +++ b/test/config/non_map_list.yaml @@ -2,4 +2,4 @@ domain: test-domain descriptors: - a - b - - c \ No newline at end of file + - c diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 2699b405..2d269eb5 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -12,15 +12,16 @@ import ( "time" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/envoyproxy/ratelimit/src/memcached" - "github.com/envoyproxy/ratelimit/src/service_cmd/runner" - "github.com/envoyproxy/ratelimit/src/settings" - "github.com/envoyproxy/ratelimit/test/common" "github.com/golang/protobuf/ptypes/duration" "github.com/kelseyhightower/envconfig" "github.com/stretchr/testify/assert" "golang.org/x/net/context" "google.golang.org/grpc" + + "github.com/envoyproxy/ratelimit/src/memcached" + "github.com/envoyproxy/ratelimit/src/service_cmd/runner" + "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/test/common" ) func init() { @@ -52,7 +53,6 @@ func defaultSettings() settings.Settings { } func newDescriptorStatus(status pb.RateLimitResponse_Code, requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, limitRemaining uint32, durRemaining *duration.Duration) *pb.RateLimitResponse_DescriptorStatus { - limit := &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit} return &pb.RateLimitResponse_DescriptorStatus{ @@ -387,7 +387,8 @@ func testBasicBaseConfig(s settings.Settings) func(*testing.T) { assert, &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, - Statuses: []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}}, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}, + }, response) assert.NoError(err) @@ -409,7 +410,9 @@ func testBasicBaseConfig(s settings.Settings) func(*testing.T) { &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ - newDescriptorStatus(pb.RateLimitResponse_OK, 50, pb.RateLimitResponse_RateLimit_SECOND, 49, durRemaining)}}, + newDescriptorStatus(pb.RateLimitResponse_OK, 50, pb.RateLimitResponse_RateLimit_SECOND, 49, durRemaining), + }, + }, response) assert.NoError(err) @@ -451,7 +454,9 @@ func testBasicBaseConfig(s settings.Settings) func(*testing.T) { &pb.RateLimitResponse{ OverallCode: status, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ - newDescriptorStatus(status, 20, pb.RateLimitResponse_RateLimit_MINUTE, limitRemaining, durRemaining)}}, + newDescriptorStatus(status, 20, pb.RateLimitResponse_RateLimit_MINUTE, limitRemaining, durRemaining), + }, + }, response) assert.NoError(err) key2HitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.total_hits", getCacheKey("key2", enable_local_cache))) @@ -499,7 +504,8 @@ func testBasicBaseConfig(s settings.Settings) func(*testing.T) { "another", [][][2]string{ {{getCacheKey("key2", enable_local_cache), strconv.Itoa(randomInt)}}, - {{getCacheKey("key3", enable_local_cache), strconv.Itoa(randomInt)}}}, 1)) + {{getCacheKey("key3", enable_local_cache), strconv.Itoa(randomInt)}}, + }, 1)) status := pb.RateLimitResponse_OK limitRemaining1 := uint32(20 - (i + 1)) @@ -516,7 +522,9 @@ func testBasicBaseConfig(s settings.Settings) func(*testing.T) { OverallCode: status, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ newDescriptorStatus(pb.RateLimitResponse_OK, 20, pb.RateLimitResponse_RateLimit_MINUTE, limitRemaining1, durRemaining1), - newDescriptorStatus(status, 10, pb.RateLimitResponse_RateLimit_HOUR, limitRemaining2, durRemaining2)}}, + newDescriptorStatus(status, 10, pb.RateLimitResponse_RateLimit_HOUR, limitRemaining2, durRemaining2), + }, + }, response) assert.NoError(err) @@ -630,7 +638,8 @@ func testConfigReload(s settings.Settings) func(*testing.T) { assert, &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, - Statuses: []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK}}}, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK}}, + }, response) assert.NoError(err) @@ -688,7 +697,9 @@ func testConfigReload(s settings.Settings) func(*testing.T) { &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ - newDescriptorStatus(pb.RateLimitResponse_OK, 50, pb.RateLimitResponse_RateLimit_SECOND, 49, durRemaining)}}, + newDescriptorStatus(pb.RateLimitResponse_OK, 50, pb.RateLimitResponse_RateLimit_SECOND, 49, durRemaining), + }, + }, response) assert.NoError(err) diff --git a/test/limiter/base_limiter_test.go b/test/limiter/base_limiter_test.go index 76e9df65..1a9a4d15 100644 --- a/test/limiter/base_limiter_test.go +++ b/test/limiter/base_limiter_test.go @@ -8,13 +8,14 @@ import ( "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/golang/mock/gomock" + stats "github.com/lyft/gostats" + "github.com/stretchr/testify/assert" + "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/test/common" mock_utils "github.com/envoyproxy/ratelimit/test/mocks/utils" - "github.com/golang/mock/gomock" - stats "github.com/lyft/gostats" - "github.com/stretchr/testify/assert" ) func TestGenerateCacheKeys(t *testing.T) { @@ -206,7 +207,6 @@ func TestGetResponseStatusBelowLimit(t *testing.T) { assert.Equal(uint64(1), limits[0].Stats.WithinLimit.Value()) // No shadow_mode so, no stats change assert.Equal(uint64(0), limits[0].Stats.ShadowMode.Value()) - } func TestGetResponseStatusBelowLimitShadowMode(t *testing.T) { @@ -228,5 +228,4 @@ func TestGetResponseStatusBelowLimitShadowMode(t *testing.T) { assert.Equal(uint64(1), limits[0].Stats.WithinLimit.Value()) // No shadow_mode so, no stats change assert.Equal(uint64(0), limits[0].Stats.ShadowMode.Value()) - } diff --git a/test/memcached/cache_impl_test.go b/test/memcached/cache_impl_test.go index f29a6295..a4640148 100644 --- a/test/memcached/cache_impl_test.go +++ b/test/memcached/cache_impl_test.go @@ -15,18 +15,20 @@ import ( "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + stats "github.com/lyft/gostats" + "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/memcached" "github.com/envoyproxy/ratelimit/src/settings" "github.com/envoyproxy/ratelimit/src/utils" - stats "github.com/lyft/gostats" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" "github.com/envoyproxy/ratelimit/test/common" mock_memcached "github.com/envoyproxy/ratelimit/test/mocks/memcached" mock_utils "github.com/envoyproxy/ratelimit/test/mocks/utils" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" ) func TestMemcached(t *testing.T) { @@ -71,10 +73,13 @@ func TestMemcached(t *testing.T) { }, 1) limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false), + } assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[1].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[1].Stats.OverLimit.Value()) @@ -88,7 +93,8 @@ func TestMemcached(t *testing.T) { }).Return( getMultiResult(map[string]int{ "domain_key3_value3_997200": 10, - "domain_key3_value3_subkey3_subvalue3_950400": 12}), + "domain_key3_value3_subkey3_subvalue3_950400": 12, + }), nil, ) client.EXPECT().Increment("domain_key3_value3_997200", uint64(1)).Return(uint64(11), nil) @@ -102,11 +108,13 @@ func TestMemcached(t *testing.T) { }, 1) limits = []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false, false), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false), + } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -229,11 +237,13 @@ func TestOverLimitWithLocalCache(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false), + } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -253,7 +263,8 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -273,7 +284,8 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -290,7 +302,8 @@ func TestOverLimitWithLocalCache(t *testing.T) { client.EXPECT().Increment("domain_key4_value4_997200", uint64(1)).Times(0) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(4), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) @@ -325,11 +338,13 @@ func TestNearLimit(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false), + } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -345,7 +360,8 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -362,7 +378,8 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) diff --git a/test/memcached/stats_collecting_client_test.go b/test/memcached/stats_collecting_client_test.go index 548b9304..d2e2b5cd 100644 --- a/test/memcached/stats_collecting_client_test.go +++ b/test/memcached/stats_collecting_client_test.go @@ -5,11 +5,12 @@ import ( "testing" "github.com/bradfitz/gomemcache/memcache" - "github.com/envoyproxy/ratelimit/src/memcached" - mock_memcached "github.com/envoyproxy/ratelimit/test/mocks/memcached" "github.com/golang/mock/gomock" stats "github.com/lyft/gostats" "github.com/stretchr/testify/assert" + + "github.com/envoyproxy/ratelimit/src/memcached" + mock_memcached "github.com/envoyproxy/ratelimit/test/mocks/memcached" ) type fakeSink struct { diff --git a/test/metrics/metrics_test.go b/test/metrics/metrics_test.go index 04ffbd68..a3537742 100644 --- a/test/metrics/metrics_test.go +++ b/test/metrics/metrics_test.go @@ -5,11 +5,12 @@ import ( "testing" "time" - "github.com/envoyproxy/ratelimit/src/metrics" stats "github.com/lyft/gostats" statsMock "github.com/lyft/gostats/mock" "github.com/stretchr/testify/assert" "google.golang.org/grpc" + + "github.com/envoyproxy/ratelimit/src/metrics" ) func TestMetricsInterceptor(t *testing.T) { diff --git a/test/mocks/config/config.go b/test/mocks/config/config.go index b34328dc..7ac5220c 100644 --- a/test/mocks/config/config.go +++ b/test/mocks/config/config.go @@ -6,11 +6,13 @@ package mock_config import ( context "context" + reflect "reflect" + envoy_extensions_common_ratelimit_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + gomock "github.com/golang/mock/gomock" + config "github.com/envoyproxy/ratelimit/src/config" stats "github.com/envoyproxy/ratelimit/src/stats" - gomock "github.com/golang/mock/gomock" - reflect "reflect" ) // MockRateLimitConfig is a mock of RateLimitConfig interface diff --git a/test/mocks/limiter/limiter.go b/test/mocks/limiter/limiter.go index 48f995a1..8e47f042 100644 --- a/test/mocks/limiter/limiter.go +++ b/test/mocks/limiter/limiter.go @@ -6,10 +6,12 @@ package mock_limiter import ( context "context" + reflect "reflect" + envoy_service_ratelimit_v3 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - config "github.com/envoyproxy/ratelimit/src/config" gomock "github.com/golang/mock/gomock" - reflect "reflect" + + config "github.com/envoyproxy/ratelimit/src/config" ) // MockRateLimitCache is a mock of RateLimitCache interface diff --git a/test/mocks/memcached/client.go b/test/mocks/memcached/client.go index 433105bd..b00abf1e 100644 --- a/test/mocks/memcached/client.go +++ b/test/mocks/memcached/client.go @@ -5,9 +5,10 @@ package mock_memcached import ( + reflect "reflect" + memcache "github.com/bradfitz/gomemcache/memcache" gomock "github.com/golang/mock/gomock" - reflect "reflect" ) // MockClient is a mock of Client interface diff --git a/test/mocks/redis/redis.go b/test/mocks/redis/redis.go index 032b500d..2d6d059f 100644 --- a/test/mocks/redis/redis.go +++ b/test/mocks/redis/redis.go @@ -5,9 +5,11 @@ package mock_redis import ( - redis "github.com/envoyproxy/ratelimit/src/redis" - gomock "github.com/golang/mock/gomock" reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + redis "github.com/envoyproxy/ratelimit/src/redis" ) // MockClient is a mock of Client interface diff --git a/test/mocks/rls/rls.go b/test/mocks/rls/rls.go index 92d79b9a..57fdb2f7 100644 --- a/test/mocks/rls/rls.go +++ b/test/mocks/rls/rls.go @@ -6,9 +6,10 @@ package mock_v3 import ( context "context" + reflect "reflect" + envoy_service_ratelimit_v3 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" gomock "github.com/golang/mock/gomock" - reflect "reflect" ) // MockRateLimitServiceServer is a mock of RateLimitServiceServer interface diff --git a/test/mocks/runtime/loader/loader.go b/test/mocks/runtime/loader/loader.go index da00c649..58f238bb 100644 --- a/test/mocks/runtime/loader/loader.go +++ b/test/mocks/runtime/loader/loader.go @@ -5,9 +5,10 @@ package mock_loader import ( + reflect "reflect" + gomock "github.com/golang/mock/gomock" snapshot "github.com/lyft/goruntime/snapshot" - reflect "reflect" ) // MockIFace is a mock of IFace interface diff --git a/test/mocks/runtime/snapshot/snapshot.go b/test/mocks/runtime/snapshot/snapshot.go index a56fe5a5..ae62e0c2 100644 --- a/test/mocks/runtime/snapshot/snapshot.go +++ b/test/mocks/runtime/snapshot/snapshot.go @@ -5,10 +5,11 @@ package mock_snapshot import ( - gomock "github.com/golang/mock/gomock" - entry "github.com/lyft/goruntime/snapshot/entry" reflect "reflect" time "time" + + gomock "github.com/golang/mock/gomock" + entry "github.com/lyft/goruntime/snapshot/entry" ) // MockIFace is a mock of IFace interface diff --git a/test/mocks/stats/manager.go b/test/mocks/stats/manager.go index c0045b14..14850ac6 100644 --- a/test/mocks/stats/manager.go +++ b/test/mocks/stats/manager.go @@ -1,9 +1,10 @@ package stats import ( - "github.com/envoyproxy/ratelimit/src/stats" gostats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" + + "github.com/envoyproxy/ratelimit/src/stats" ) type MockStatManager struct { diff --git a/test/mocks/utils/utils.go b/test/mocks/utils/utils.go index 1812f4f0..44998db1 100644 --- a/test/mocks/utils/utils.go +++ b/test/mocks/utils/utils.go @@ -5,8 +5,9 @@ package mock_utils import ( - gomock "github.com/golang/mock/gomock" reflect "reflect" + + gomock "github.com/golang/mock/gomock" ) // MockTimeSource is a mock of TimeSource interface diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index b5376e63..66f6ccbb 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -2,6 +2,7 @@ package redis_test import ( "context" + "math/rand" "runtime" "testing" "time" @@ -9,12 +10,11 @@ import ( "github.com/envoyproxy/ratelimit/test/mocks/stats" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + gostats "github.com/lyft/gostats" + "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/redis" "github.com/envoyproxy/ratelimit/src/utils" - gostats "github.com/lyft/gostats" - - "math/rand" "github.com/envoyproxy/ratelimit/test/common" ) diff --git a/test/redis/driver_impl_test.go b/test/redis/driver_impl_test.go index b4858da1..f549a754 100644 --- a/test/redis/driver_impl_test.go +++ b/test/redis/driver_impl_test.go @@ -5,9 +5,10 @@ import ( "time" "github.com/alicebob/miniredis/v2" - "github.com/envoyproxy/ratelimit/src/redis" stats "github.com/lyft/gostats" "github.com/stretchr/testify/assert" + + "github.com/envoyproxy/ratelimit/src/redis" ) func mustNewRedisServer() *miniredis.Miniredis { diff --git a/test/redis/fixed_cache_impl_test.go b/test/redis/fixed_cache_impl_test.go index b13cd946..e28da4b5 100644 --- a/test/redis/fixed_cache_impl_test.go +++ b/test/redis/fixed_cache_impl_test.go @@ -1,6 +1,7 @@ package redis_test import ( + "math/rand" "testing" "github.com/envoyproxy/ratelimit/test/mocks/stats" @@ -9,19 +10,19 @@ import ( "github.com/mediocregopher/radix/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + gostats "github.com/lyft/gostats" + "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" "github.com/envoyproxy/ratelimit/src/utils" - gostats "github.com/lyft/gostats" - "math/rand" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" "github.com/envoyproxy/ratelimit/test/common" mock_redis "github.com/envoyproxy/ratelimit/test/mocks/redis" mock_utils "github.com/envoyproxy/ratelimit/test/mocks/utils" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" ) func TestRedis(t *testing.T) { @@ -89,10 +90,13 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { }, 1) limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false), + } assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[1].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[1].Stats.OverLimit.Value()) @@ -117,11 +121,13 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { }, 1) limits = []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false, false), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false), + } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -194,11 +200,13 @@ func TestOverLimitWithLocalCache(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false), + } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -218,7 +226,8 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -238,7 +247,8 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -256,7 +266,8 @@ func TestOverLimitWithLocalCache(t *testing.T) { "EXPIRE", "domain_key4_value4_997200", int64(3600)).Times(0) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(4), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) @@ -289,11 +300,13 @@ func TestNearLimit(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false), + } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -309,7 +322,8 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -326,7 +340,8 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -491,11 +506,13 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, true)} + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, true), + } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -515,7 +532,8 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -536,7 +554,8 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { // The result should be OK since limit is in ShadowMode assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -556,7 +575,8 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { // The result should be OK since limit is in ShadowMode assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + }, cache.DoLimit(nil, request, limits)) // TODO: How should we handle statistics? Should there be a separate ShadowMode statistics? Should the other Stats remain as if they were unaffected by shadowmode? assert.Equal(uint64(4), limits[0].Stats.TotalHits.Value()) diff --git a/test/server/health_test.go b/test/server/health_test.go index a79e3642..d9610507 100644 --- a/test/server/health_test.go +++ b/test/server/health_test.go @@ -8,10 +8,11 @@ import ( "syscall" "testing" - "github.com/envoyproxy/ratelimit/src/server" "google.golang.org/grpc" "google.golang.org/grpc/health" healthpb "google.golang.org/grpc/health/grpc_health_v1" + + "github.com/envoyproxy/ratelimit/src/server" ) func TestHealthCheck(t *testing.T) { @@ -42,7 +43,6 @@ func TestHealthCheck(t *testing.T) { if 500 != recorder.Code { t.Errorf("expected code 500 actual %d", recorder.Code) } - } func TestGrpcHealthCheck(t *testing.T) { diff --git a/test/server/server_impl_test.go b/test/server/server_impl_test.go index 8ee22161..19a8e59f 100644 --- a/test/server/server_impl_test.go +++ b/test/server/server_impl_test.go @@ -2,20 +2,22 @@ package server_test import ( "fmt" - "github.com/golang/protobuf/proto" - "github.com/stretchr/testify/mock" "io/ioutil" "net/http" "net/http/httptest" "strings" "testing" + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/mock" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/envoyproxy/ratelimit/src/server" - mock_v3 "github.com/envoyproxy/ratelimit/test/mocks/rls" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + + "github.com/envoyproxy/ratelimit/src/server" + mock_v3 "github.com/envoyproxy/ratelimit/test/mocks/rls" ) func assertHttpResponse(t *testing.T, diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index 77e5bf95..ead86ab6 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -12,6 +12,11 @@ import ( core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/golang/mock/gomock" + gostats "github.com/lyft/gostats" + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" + "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/redis" ratelimit "github.com/envoyproxy/ratelimit/src/service" @@ -21,10 +26,6 @@ import ( mock_loader "github.com/envoyproxy/ratelimit/test/mocks/runtime/loader" mock_snapshot "github.com/envoyproxy/ratelimit/test/mocks/runtime/snapshot" mock_stats "github.com/envoyproxy/ratelimit/test/mocks/stats" - "github.com/golang/mock/gomock" - gostats "github.com/lyft/gostats" - "github.com/stretchr/testify/assert" - "golang.org/x/net/context" ) type barrier struct { @@ -118,7 +119,8 @@ func TestService(test *testing.T) { t.assert, &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, - Statuses: []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}}, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}, + }, response) t.assert.Nil(err) @@ -135,12 +137,15 @@ func TestService(test *testing.T) { "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), - nil} + nil, + } t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) t.cache.EXPECT().DoLimit(nil, request, limits).Return( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + }) response, err = service.ShouldRateLimit(nil, request) common.AssertProtoEqual( t.assert, @@ -149,7 +154,8 @@ func TestService(test *testing.T) { Statuses: []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, - }}, + }, + }, response) t.assert.Nil(err) @@ -166,12 +172,15 @@ func TestService(test *testing.T) { // Config should still be valid. Also make sure order does not affect results. limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), + } t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) t.cache.EXPECT().DoLimit(nil, request, limits).Return( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0}}) + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0}, + }) response, err = service.ShouldRateLimit(nil, request) common.AssertProtoEqual( t.assert, @@ -180,7 +189,8 @@ func TestService(test *testing.T) { Statuses: []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0}, - }}, + }, + }, response) t.assert.Nil(err) @@ -216,12 +226,15 @@ func TestServiceGlobalShadowMode(test *testing.T) { // Global Shadow mode limits := []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), - nil} + nil, + } t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) t.cache.EXPECT().DoLimit(nil, request, limits).Return( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + }) response, err := service.ShouldRateLimit(nil, request) // OK overall code even if limit response was OVER_LIMIT @@ -232,7 +245,8 @@ func TestServiceGlobalShadowMode(test *testing.T) { Statuses: []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, - }}, + }, + }, response) t.assert.Nil(err) @@ -252,12 +266,15 @@ func TestRuleShadowMode(test *testing.T) { "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true), + } t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) t.cache.EXPECT().DoLimit(nil, request, limits).Return( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + }) response, err := service.ShouldRateLimit(nil, request) t.assert.Equal( &pb.RateLimitResponse{ @@ -265,7 +282,8 @@ func TestRuleShadowMode(test *testing.T) { Statuses: []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, - }}, + }, + }, response) t.assert.Nil(err) @@ -281,7 +299,8 @@ func TestMixedRuleShadowMode(test *testing.T) { "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), + } t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) testResults := []pb.RateLimitResponse_Code{pb.RateLimitResponse_OVER_LIMIT, pb.RateLimitResponse_OVER_LIMIT} @@ -291,8 +310,10 @@ func TestMixedRuleShadowMode(test *testing.T) { } } t.cache.EXPECT().DoLimit(nil, request, limits).Return( - []*pb.RateLimitResponse_DescriptorStatus{{Code: testResults[0], CurrentLimit: limits[0].Limit, LimitRemaining: 0}, - {Code: testResults[1], CurrentLimit: nil, LimitRemaining: 0}}) + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: testResults[0], CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: testResults[1], CurrentLimit: nil, LimitRemaining: 0}, + }) response, err := service.ShouldRateLimit(nil, request) t.assert.Equal( &pb.RateLimitResponse{ @@ -300,7 +321,8 @@ func TestMixedRuleShadowMode(test *testing.T) { Statuses: []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: nil, LimitRemaining: 0}, - }}, + }, + }, response) t.assert.Nil(err) @@ -336,12 +358,15 @@ func TestServiceWithCustomRatelimitHeaders(test *testing.T) { "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), - nil} + nil, + } t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) t.cache.EXPECT().DoLimit(nil, request, limits).Return( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + }) response, err := service.ShouldRateLimit(nil, request) common.AssertProtoEqual( @@ -385,12 +410,15 @@ func TestServiceWithDefaultRatelimitHeaders(test *testing.T) { "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false), - nil} + nil, + } t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[1]).Return(limits[1]) t.cache.EXPECT().DoLimit(nil, request, limits).Return( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + }) response, err := service.ShouldRateLimit(nil, request) common.AssertProtoEqual( @@ -487,7 +515,8 @@ func TestUnlimited(test *testing.T) { limits := []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("foo_bar"), false, false), nil, - config.NewRateLimit(55, pb.RateLimitResponse_RateLimit_SECOND, t.statsManager.NewStats("baz_qux"), true, false)} + config.NewRateLimit(55, pb.RateLimitResponse_RateLimit_SECOND, t.statsManager.NewStats("baz_qux"), true, false), + } t.config.EXPECT().GetLimit(nil, "some-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(nil, "some-domain", request.Descriptors[1]).Return(limits[1]) t.config.EXPECT().GetLimit(nil, "some-domain", request.Descriptors[2]).Return(limits[2]) @@ -510,7 +539,8 @@ func TestUnlimited(test *testing.T) { {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9}, {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: math.MaxUint32}, - }}, + }, + }, response) t.assert.Nil(err) } diff --git a/test/srv/srv_test.go b/test/srv/srv_test.go index 5e3e8f79..55cb3113 100644 --- a/test/srv/srv_test.go +++ b/test/srv/srv_test.go @@ -5,8 +5,9 @@ import ( "net" "testing" - "github.com/envoyproxy/ratelimit/src/srv" "github.com/stretchr/testify/assert" + + "github.com/envoyproxy/ratelimit/src/srv" ) func TestParseSrv(t *testing.T) {