diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 357928b84..fc8bf19f9 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -26,23 +26,23 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install Go - uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12 + uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12 + uses: github/codeql-action/autobuild@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12 + uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 6d5965269..fa0cd727e 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -12,21 +12,21 @@ jobs: check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: check format run: make check_format build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Set up QEMU - uses: docker/setup-qemu-action@27d0a4f181a40b142cce983c5393082c365d1480 # v1.2.0 + uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - name: Set up Docker buildx id: buildx - uses: docker/setup-buildx-action@f211e3e9ded2d9377c8cadc4489a4e38014bc4c9 # v1.7.0 + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: build and push docker image run: | @@ -41,15 +41,15 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: "3.9" - - uses: actions/setup-go@bfdd3570ce990073878bf10f6b2d79082de49492 # v2.2.0 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: - go-version: "1.21.5" + go-version: "1.23.9" - name: run pre-commits run: | diff --git a/.github/workflows/pullrequest.yaml b/.github/workflows/pullrequest.yaml index 1289fad6f..12eedcef9 100644 --- a/.github/workflows/pullrequest.yaml +++ b/.github/workflows/pullrequest.yaml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: check format run: make check_format @@ -20,7 +20,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: build and test run: make docker_tests @@ -29,15 +29,15 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: "3.9" - - uses: actions/setup-go@bfdd3570ce990073878bf10f6b2d79082de49492 # v2.2.0 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: - go-version: "1.21.5" + go-version: "1.23.9" - name: run pre-commits run: | diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index dc6ff10a8..8193b01d9 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -12,20 +12,20 @@ jobs: check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: check format run: make check_format build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Set up QEMU - uses: docker/setup-qemu-action@27d0a4f181a40b142cce983c5393082c365d1480 # v1.2.0 + uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - name: Set up Docker buildx id: buildx - uses: docker/setup-buildx-action@f211e3e9ded2d9377c8cadc4489a4e38014bc4c9 # v1.7.0 + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: build and push docker image run: | diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 20321ba2e..f053a9c89 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -29,12 +29,12 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1 + uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # v2.4.0 with: results_file: results.sarif results_format: sarif @@ -56,7 +56,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@3cea5372237819ed00197afe530f5a7ea3e805c8 # v3.1.0 + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 with: name: SARIF file path: results.sarif @@ -64,6 +64,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@17573ee1cc1b9d061760f3a006fc4aac4f944fd5 # v2.2.4 + uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 with: sarif_file: results.sarif diff --git a/.gitignore b/.gitignore index 37c261b44..558d1760e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ cover.out bin/ +env/ .idea/ .vscode/ vendor diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f23dc0426..961245085 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,7 +7,7 @@ repos: - id: go-imports args: ["-w", "-local", "github.com/envoyproxy/ratelimit"] - id: go-fumpt - args: ["-w"] + args: ["-l", "-w", "."] - repo: https://github.com/pre-commit/mirrors-prettier rev: "v2.4.1" diff --git a/Dockerfile b/Dockerfile index 9e5c40ab6..cf0daf53e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.5@sha256:672a2286da3ee7a854c3e0a56e0838918d0dbb1c18652992930293312de898a6 AS build +FROM golang:1.25.6@sha256:fc24d3881a021e7b968a4610fc024fba749f98fe5c07d4f28e6cfa14dc65a84c AS build WORKDIR /ratelimit ENV GOPROXY=https://proxy.golang.org @@ -10,6 +10,5 @@ COPY script script RUN CGO_ENABLED=0 GOOS=linux go build -o /go/bin/ratelimit -ldflags="-w -s" -v github.com/envoyproxy/ratelimit/src/service_cmd -FROM alpine:3.18.5@sha256:34871e7290500828b39e22294660bee86d966bc0017544e848dd9a255cdf59e0 AS final -RUN apk --no-cache add ca-certificates && apk --no-cache update +FROM gcr.io/distroless/static-debian12:nonroot@sha256:e8a4044e0b4ae4257efa45fc026c0bc30ad320d43bd4c1a7d5271bd241e386d0 COPY --from=build /go/bin/ratelimit /bin/ratelimit diff --git a/Dockerfile.integration b/Dockerfile.integration index 25cae665c..17a5b9f27 100644 --- a/Dockerfile.integration +++ b/Dockerfile.integration @@ -1,5 +1,5 @@ # Running this docker image runs the integration tests. -FROM golang@sha256:672a2286da3ee7a854c3e0a56e0838918d0dbb1c18652992930293312de898a6 +FROM golang:1.25.6@sha256:fc24d3881a021e7b968a4610fc024fba749f98fe5c07d4f28e6cfa14dc65a84c RUN apt-get update -y && apt-get install sudo stunnel4 redis memcached -y && rm -rf /var/lib/apt/lists/* diff --git a/Makefile b/Makefile index 2f98413d5..342e2beba 100644 --- a/Makefile +++ b/Makefile @@ -11,6 +11,11 @@ BUILDX_PLATFORMS := linux/amd64,linux/arm64/v8 # Root dir returns absolute path of current directory. It has a trailing "/". PROJECT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) export PROJECT_DIR +ifneq ($(shell docker compose version 2>/dev/null),) + DOCKER_COMPOSE=docker compose +else + DOCKER_COMPOSE=docker-compose +endif .PHONY: bootstrap bootstrap: ; @@ -142,7 +147,7 @@ docker_multiarch_push: docker_multiarch_image .PHONY: integration_tests integration_tests: - docker-compose --project-directory $(PWD) -f integration-test/docker-compose-integration-test.yml up --build --exit-code-from tester + $(DOCKER_COMPOSE) --project-directory $(PWD) -f integration-test/docker-compose-integration-test.yml up --build --exit-code-from tester .PHONY: precommit_install precommit_install: diff --git a/README.md b/README.md index 9dd39a7f5..f2d9291d3 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,9 @@ - [Overview](#overview) - [Docker Image](#docker-image) + - [Distroless Base Image](#distroless-base-image) + - [Benefits of Distroless:](#benefits-of-distroless) + - [Debugging with Distroless:](#debugging-with-distroless) - [Supported Envoy APIs](#supported-envoy-apis) - [API Deprecation History](#api-deprecation-history) - [Building and Testing](#building-and-testing) @@ -18,6 +21,8 @@ - [Replaces](#replaces) - [ShadowMode](#shadowmode) - [Including detailed metrics for unspecified values](#including-detailed-metrics-for-unspecified-values) + - [Including descriptor values in metrics](#including-descriptor-values-in-metrics) + - [Sharing thresholds for wildcard matches](#sharing-thresholds-for-wildcard-matches) - [Examples](#examples) - [Example 1](#example-1) - [Example 2](#example-2) @@ -28,6 +33,8 @@ - [Example 7](#example-7) - [Example 8](#example-8) - [Example 9](#example-9) + - [Example 10](#example-10) + - [Example 11](#example-11) - [Loading Configuration](#loading-configuration) - [File Based Configuration Loading](#file-based-configuration-loading) - [xDS Management Server Based Configuration Loading](#xds-management-server-based-configuration-loading) @@ -35,6 +42,7 @@ - [GRPC Keepalive](#grpc-keepalive) - [Health-check](#health-check) - [Health-check configurations](#health-check-configurations) + - [GRPC server](#grpc-server) - [Request Fields](#request-fields) - [GRPC Client](#grpc-client) - [Commandline flags](#commandline-flags) @@ -43,19 +51,28 @@ - [Statistics](#statistics) - [Statistics](#statistics-1) - [Statistics options](#statistics-options) + - [DogStatsD](#dogstatsd) + - [Example](#example) + - [Continued example:](#continued-example) + - [Prometheus](#prometheus) - [HTTP Port](#http-port) - [/json endpoint](#json-endpoint) - [Debug Port](#debug-port) - [Local Cache](#local-cache) - [Redis](#redis) - [Redis type](#redis-type) - - [Pipelining](#pipelining) + - [Connection Pool Settings](#connection-pool-settings) + - [Pool Size](#pool-size) + - [Connection Timeout](#connection-timeout) + - [Pool On-Empty Behavior](#pool-on-empty-behavior) + - [Pipelining](#pipelining) - [One Redis Instance](#one-redis-instance) - [Two Redis Instances](#two-redis-instances) - [Health Checking for Redis Active Connection](#health-checking-for-redis-active-connection) - [Memcache](#memcache) - [Custom headers](#custom-headers) - [Tracing](#tracing) +- [TLS](#tls) - [mTLS](#mtls) - [Contact](#contact) @@ -74,6 +91,32 @@ decision is then returned to the caller. For every main commit, an image is pushed to [Dockerhub](https://hub.docker.com/r/envoyproxy/ratelimit/tags?page=1&ordering=last_updated). There is currently no versioning (post v1.4.0) and tags are based on commit sha. +## Distroless Base Image + +The Docker image uses Google's [distroless](https://github.com/GoogleContainerTools/distroless) base image (`gcr.io/distroless/static-debian12:nonroot`) for enhanced security and minimal attack surface. Distroless images contain only the application and its runtime dependencies, omitting unnecessary OS components like package managers, shells, and other utilities. + +The image is pinned to a specific SHA digest for deterministic builds and uses the `nonroot` variant to run as a non-privileged user, following security best practices. + +### Benefits of Distroless: + +- **Enhanced Security**: Minimal attack surface with no unnecessary components +- **Smaller Image Size**: Significantly smaller than traditional base images +- **Reduced Vulnerabilities**: Fewer components means fewer potential security issues +- **Better Compliance**: Meets security requirements for minimal base images +- **Non-root Execution**: Runs as a non-privileged user (UID 65532) for enhanced security +- **Deterministic Builds**: Pinned to specific SHA digest ensures reproducible builds + +### Debugging with Distroless: + +For debugging purposes, you can use the debug variant of the distroless image: + +```dockerfile +FROM gcr.io/distroless/static-debian12:debug +COPY --from=build /go/bin/ratelimit /bin/ratelimit +``` + +This provides shell access and debugging tools while maintaining the security benefits of distroless. + # Supported Envoy APIs [v3 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) is currently supported. @@ -126,14 +169,13 @@ Support for [v2 rls proto](https://github.com/envoyproxy/data-plane-api/blob/mas ## Docker-compose setup -The docker-compose setup has three containers: redis, ratelimit-build, and ratelimit. In order to run the docker-compose setup from the root of the repo, run +The docker-compose setup uses a distroless-based container for the ratelimit service. In order to run the docker-compose setup from the root of the repo, run ```bash docker-compose up ``` -The ratelimit-build container will build the ratelimit binary. Then via a shared volume the binary will be shared with the ratelimit container. This dual container setup is used in order to use a -a minimal container to run the application, rather than the heftier container used to build it. +The ratelimit service is built using the main Dockerfile which uses Google's distroless base image for enhanced security and minimal attack surface. The distroless image contains only the application and its runtime dependencies, omitting unnecessary OS components like package managers and shells. If you want to run with [two redis instances](#two-redis-instances), you will need to modify the docker-compose.yml file to run a second redis container, and change the environment variables @@ -247,6 +289,8 @@ descriptors: requests_per_unit: shadow_mode: (optional) detailed_metric: (optional) + value_to_metric: (optional) + share_threshold: (optional) descriptors: (optional block) - ... (nested repetition of above) ``` @@ -301,6 +345,28 @@ Setting the `detailed_metric: true` for a descriptor will extend the metrics tha NB! This should only be enabled in situations where the potentially large cardinality of metrics that this can lead to is acceptable. +### Including descriptor values in metrics + +Setting `value_to_metric: true` (default: `false`) for a descriptor will include the descriptor's runtime value in the metric key, even when the descriptor value is not explicitly defined in the configuration. This allows you to track metrics per descriptor value when the value comes from the runtime request, providing visibility into different rate limit scenarios without needing to pre-define every possible value. + +**Note:** If a value is explicitly specified in a descriptor (e.g., `value: "GET"`), that value is always included in the metric key regardless of the `value_to_metric` setting. The `value_to_metric` flag only affects descriptors where the value is not explicitly defined in the configuration. + +When combined with wildcard matching, the full runtime value is included in the metric key, not just the wildcard prefix. This feature works independently of `detailed_metric` - when `detailed_metric` is set, it takes precedence and `value_to_metric` is ignored. + +### Sharing thresholds for wildcard matches + +Setting `share_threshold: true` (default: `false`) for a descriptor with a wildcard value (ending with `*`) allows all values matching that wildcard to share the same rate limit threshold, instead of using isolated thresholds for each matching value. + +This is useful when you want to apply a single rate limit across multiple resources that match a wildcard pattern. For example, if you have a rule for `files/*`, both `files/a.pdf` and `files/b.csv` will share the same threshold when `share_threshold: true` is set. + +**Important notes:** + +- `share_threshold` can only be used with wildcard values (values ending with `*`) +- When `share_threshold: true` is enabled, all matching values share the same cache key and rate limit counter +- When `share_threshold: false` (or not set), each matching value has its own isolated threshold +- When combined with `value_to_metric: true`, the metric key includes the wildcard prefix (the part before `*`) instead of the full runtime value, to reflect that values are sharing a threshold +- When combined with `detailed_metric: true`, the metric key also includes the wildcard prefix for entries with `share_threshold` enabled + ### Examples #### Example 1 @@ -594,6 +660,110 @@ descriptors: requests_per_unit: 20 ``` +#### Example 10 + +Using `value_to_metric: true` to include descriptor values in metrics when values are not explicitly defined in the configuration: + +```yaml +domain: example10 +descriptors: + - key: route + value_to_metric: true + descriptors: + - key: http_method + value_to_metric: true + descriptors: + - key: subject_id + rate_limit: + unit: minute + requests_per_unit: 60 +``` + +With this configuration, requests with different runtime values for `route` and `http_method` will generate separate metrics: + +- Request: `route=api`, `http_method=GET`, `subject_id=123` +- Metric key: `example10.route_api.http_method_GET.subject_id` + +- Request: `route=web`, `http_method=POST`, `subject_id=456` +- Metric key: `example10.route_web.http_method_POST.subject_id` + +Without `value_to_metric: true`, both requests would use the same metric key: `example10.route.http_method.subject_id`. + +When combined with wildcard matching, the full runtime value is included: + +```yaml +domain: example10_wildcard +descriptors: + - key: user + value_to_metric: true + descriptors: + - key: action + value: read* + value_to_metric: true + descriptors: + - key: resource + rate_limit: + unit: minute + requests_per_unit: 100 +``` + +- Request: `user=alice`, `action=readfile`, `resource=documents` +- Metric key: `example10_wildcard.user_alice.action_readfile.resource` + +Note: When `detailed_metric: true` is set on a descriptor, it takes precedence and `value_to_metric` is ignored for that descriptor. + +#### Example 11 + +Using `share_threshold: true` to share rate limits across wildcard matches: + +```yaml +domain: example11 +descriptors: + # With share_threshold: true, all files/* matches share the same threshold + - key: files + value: files/* + share_threshold: true + rate_limit: + unit: hour + requests_per_unit: 10 + + # Without share_threshold, each files_no_share/* match has its own isolated threshold + - key: files_no_share + value: files_no_share/* + share_threshold: false + rate_limit: + unit: hour + requests_per_unit: 10 +``` + +With this configuration: + +- Requests for `files/a.pdf`, `files/b.csv`, and `files/c.txt` all share the same threshold of 10 requests per hour +- If 5 requests are made for `files/a.pdf` and 5 requests for `files/b.csv`, a request for `files/c.txt` will be rate limited (OVER_LIMIT) because the shared threshold of 10 has been reached +- Requests for `files_no_share/a.pdf` and `files_no_share/b.csv` each have their own isolated threshold of 10 requests per hour +- If 10 requests are made for `files_no_share/a.pdf` (exhausting its quota), requests for `files_no_share/b.csv` will still be allowed (up to 10 requests) + +Combining `share_threshold` with `value_to_metric`: + +```yaml +domain: example11_metrics +descriptors: + - key: route + value: api/* + share_threshold: true + value_to_metric: true + descriptors: + - key: method + rate_limit: + unit: minute + requests_per_unit: 60 +``` + +- Request: `route=api/v1`, `method=GET` +- Metric key: `example11_metrics.route_api.method_GET` (includes the wildcard prefix `api` instead of the full value `api/v1`) + +This reflects that all `api/*` routes share the same threshold, while still providing visibility into which API routes are being accessed. + ## Loading Configuration Rate limit service supports following configuration loading methods. You can define which methods to use by configuring environment variable `CONFIG_TYPE`. @@ -647,7 +817,19 @@ To enable this behavior set `MERGE_DOMAIN_CONFIG` to `true`. xDS Management Server is a gRPC server which implements the [Aggregated Discovery Service (ADS)](https://github.com/envoyproxy/data-plane-api/blob/97b6dae39046f7da1331a4dc57830d20e842fc26/envoy/service/discovery/v3/ads.proto). The xDS Management server serves [Discovery Response](https://github.com/envoyproxy/data-plane-api/blob/97b6dae39046f7da1331a4dc57830d20e842fc26/envoy/service/discovery/v3/discovery.proto#L69) with [Ratelimit Configuration Resources](api/ratelimit/config/ratelimit/v3/rls_conf.proto) and with Type URL `"type.googleapis.com/ratelimit.config.ratelimit.v3.RateLimitConfig"`. + The xDS client in the Rate limit service configure Rate limit service with the provided configuration. +In case of connection failures, the xDS Client retries the connection to the xDS server with exponential backoff and the backoff parameters are configurable. + +1. `XDS_CLIENT_BACKOFF_JITTER`: set to `"true"` to add jitter to the exponential backoff. +2. `XDS_CLIENT_BACKOFF_INITIAL_INTERVAL`: The base amount of time the xDS client waits before retrying the connection after failure. Default: "10s" +3. `XDS_CLIENT_BACKOFF_MAX_INTERVAL`: The max backoff interval is the upper limit on the amount of time the xDS client will wait between retries. After reaching the max backoff interval, the next retries will continue using the max interval. Default: "60s" +4. `XDS_CLIENT_BACKOFF_RANDOM_FACTOR`: This is a factor by which the initial interval is multiplied to calculate the next backoff interval. Default: "0.5" + +The followings are the gRPC connection options. + +1. `XDS_CLIENT_MAX_MSG_SIZE_IN_BYTES`: The maximum message size in bytes that the xDS client can receive. + For more information on xDS protocol please refer to the [envoy proxy documentation](https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol). You can refer to [the sample xDS configuration management server](examples/xds-sotw-config-server/README.md). @@ -740,6 +922,13 @@ HEALTHY_WITH_AT_LEAST_ONE_CONFIG_LOADED default:"false"` If `HEALTHY_WITH_AT_LEAST_ONE_CONFIG_LOADED` is enabled then health check will start as unhealthy and becomes healthy if it detects at least one domain is loaded with the config. If it detects no config again then it will change to unhealthy. +## GRPC server + +By default the ratelimit gRPC server binds to `0.0.0.0:8081`. To change this set +`GRPC_HOST` and/or `GRPC_PORT`. If you want to run the server on a unix domain +socket then set `GRPC_UDS`, e.g. `GRPC_UDS=//ratelimit.sock` and leave +`GRPC_HOST` and `GRPC_PORT` unmodified. + # Request Fields For information on the fields of a Ratelimit gRPC request please read the information @@ -780,6 +969,14 @@ The rate limit service generates various statistics for each configured rate lim users both for visibility and for setting alarms. Ratelimit uses [gostats](https://github.com/lyft/gostats) as its statistics library. Please refer to [gostats' documentation](https://godoc.org/github.com/lyft/gostats) for more information on the library. +Statistics default to using [StatsD](https://github.com/statsd/statsd) and configured via the env vars from [gostats](https://github.com/lyft/gostats). + +To output statistics to stdout instead, set env var `USE_STATSD` to `false` + +Configure statistics output frequency with `STATS_FLUSH_INTERVAL`, where the type is `time.Duration`, e.g. `10s` is the default value. + +To disable statistics entirely, set env var `DISABLE_STATS` to `true` + Rate Limit Statistic Path: ``` @@ -822,6 +1019,160 @@ ratelimit.service.rate_limit.messaging.auth-service.over_limit.shadow_mode: 1 1. `EXTRA_TAGS`: set to `","` to tag all emitted stats with the provided tags. You might want to tag build commit or release version, for example. +## DogStatsD + +To enable dogstatsd integration set: + +1. `USE_DOG_STATSD`: `true` to use [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/?code-lang=go) + +dogstatsd also enables so called `mogrifiers` which can +convert from traditional stats tags into a combination of stat name and tags. + +To enable mogrifiers, set a comma-separated list of them in `DOG_STATSD_MOGRIFIERS`. + +e.g. `USE_DOG_STATSD_MOGRIFIERS`: `FOO,BAR` + +For each mogrifier, define variables that declare the mogrification + +1. `DOG_STATSD_MOGRIFIERS_%s_PATTERN`: The regex pattern to match on +2. `DOG_STATSD_MOGRIFIERS_%s_NAME`: The name of the metric to emit. Can contain variables. +3. `DOG_STATSD_MOGRIFIERS_%s_TAGS`: Comma-separated list of tags to emit. Can contain variables. + +Variables within mogrifiers are strings such as `$1`, `$2`, `$3` which can be used to reference +a match group from the regex pattern. + +### Example + +In the example below we will set mogrifier DOMAIN to adjust +`some.original.metric.TAG` to `some.original.metric` with tag `domain:TAG` + +First enable a single mogrifier: + +1. `USE_DOG_STATSD_MOGRIFIERS`: `DOMAIN` + +Then, declare the rules for the `DOMAIN` modifier: + +1. `DOG_STATSD_MOGRIFIER_DOMAIN_PATTERN`: `^some\.original\.metric\.(.*)$` +2. `DOG_STATSD_MOGRIFIER_DOMAIN_NAME`: `some.original.metric` +3. `DOG_STATSD_MOGRIFIER_DOMAIN_TAGS`: `domain:$1` + +### Continued example: + +Let's also set another mogrifier which outputs the hits metrics with a domain and descriptor tag + +First, enable an extra mogrifier: + +1. `USE_DOG_STATSD_MOGRIFIERS`: `DOMAIN,HITS` + +Then, declare additional rules for the `DESCRIPTOR` mogrifier + +1. `DOG_STATSD_MOGRIFIER_HITS_PATTERN`: `^ratelimit\.service\.rate_limit\.([^.]+)\.(.*)\.([^.]+)$` +2. `DOG_STATSD_MOGRIFIER_HITS_NAME`: `ratelimit.service.rate_limit.$3` +3. `DOG_STATSD_MOGRIFIER_HITS_TAGS`: `domain:$1,descriptor:$2` + +## Prometheus + +To enable Prometheus integration set: + +1. `USE_PROMETHEUS`: `true` to use [Prometheus](https://prometheus.io/) +2. `PROMETHEUS_ADDR`: The port to listen on for Prometheus metrics. Defaults to `:9090` +3. `PROMETHEUS_PATH`: The path to listen on for Prometheus metrics. Defaults to `/metrics` +4. `PROMETHEUS_MAPPER_YAML`: The path to the YAML file that defines the mapping from statsd to prometheus metrics. + +Define the mapping from statsd to prometheus metrics in a YAML file. +Find more information about the mapping in the [Metric Mapping and Configuration](https://github.com/prometheus/statsd_exporter?tab=readme-ov-file#metric-mapping-and-configuration). +The default setting is: + +```yaml +mappings: # Requires statsd exporter >= v0.6.0 since it uses the "drop" action. + - match: "ratelimit.service.rate_limit.*.*.near_limit" + name: "ratelimit_service_rate_limit_near_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + - match: "ratelimit.service.rate_limit.*.*.over_limit" + name: "ratelimit_service_rate_limit_over_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + - match: "ratelimit.service.rate_limit.*.*.total_hits" + name: "ratelimit_service_rate_limit_total_hits" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + - match: "ratelimit.service.rate_limit.*.*.within_limit" + name: "ratelimit_service_rate_limit_within_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + + - match: "ratelimit.service.rate_limit.*.*.*.near_limit" + name: "ratelimit_service_rate_limit_near_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" + - match: "ratelimit.service.rate_limit.*.*.*.over_limit" + name: "ratelimit_service_rate_limit_over_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" + - match: "ratelimit.service.rate_limit.*.*.*.total_hits" + name: "ratelimit_service_rate_limit_total_hits" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" + - match: "ratelimit.service.rate_limit.*.*.*.within_limit" + name: "ratelimit_service_rate_limit_within_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" + + - match: "ratelimit.service.call.should_rate_limit.*" + name: "ratelimit_service_should_rate_limit_error" + match_metric_type: counter + labels: + err_type: "$1" + + - match: "ratelimit_server.*.total_requests" + name: "ratelimit_service_total_requests" + match_metric_type: counter + labels: + grpc_method: "$1" + + - match: "ratelimit_server.*.response_time" + name: "ratelimit_service_response_time_seconds" + timer_type: histogram + labels: + grpc_method: "$1" + + - match: "ratelimit.service.config_load_success" + name: "ratelimit_service_config_load_success" + match_metric_type: counter + - match: "ratelimit.service.config_load_error" + name: "ratelimit_service_config_load_error" + match_metric_type: counter + + - match: "ratelimit.service.rate_limit.*.*.*.shadow_mode" + name: "ratelimit_service_rate_limit_shadow_mode" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" +``` + # HTTP Port The ratelimit service listens to HTTP 1.1 (by default on port 8080) with two endpoints: @@ -902,8 +1253,9 @@ As well Ratelimit supports TLS connections and authentication. These can be conf 1. `REDIS_TLS` & `REDIS_PERSECOND_TLS`: set to `"true"` to enable a TLS connection for the specific connection type. 1. `REDIS_TLS_CLIENT_CERT`, `REDIS_TLS_CLIENT_KEY`, and `REDIS_TLS_CACERT` to provides files to specify a TLS connection configuration to Redis server that requires client certificate verification. (This is effective when `REDIS_TLS` or `REDIS_PERSECOND_TLS` is set to to `"true"`). 1. `REDIS_TLS_SKIP_HOSTNAME_VERIFICATION` set to `"true"` will skip hostname verification in environments where the certificate has an invalid hostname, such as GCP Memorystore. -1. `REDIS_AUTH` & `REDIS_PERSECOND_AUTH`: set to `"password"` to enable password-only authentication to the redis host. -1. `REDIS_AUTH` & `REDIS_PERSECOND_AUTH`: set to `"username:password"` to enable username-password authentication to the redis host. +1. `REDIS_AUTH` & `REDIS_PERSECOND_AUTH`: set to `"password"` to enable password-only authentication to the Redis master/replica nodes. +1. `REDIS_AUTH` & `REDIS_PERSECOND_AUTH`: set to `"username:password"` to enable username-password authentication to the Redis master/replica nodes. +1. `REDIS_SENTINEL_AUTH` & `REDIS_PERSECOND_SENTINEL_AUTH`: set to `"password"` or `"username:password"` to enable authentication to Redis Sentinel nodes. This is separate from `REDIS_AUTH`/`REDIS_PERSECOND_AUTH` which authenticate to the Redis master/replica nodes. Only used when `REDIS_TYPE` or `REDIS_PERSECOND_TYPE` is set to `"sentinel"`. If not set, no authentication will be attempted when connecting to Sentinel nodes. 1. `CACHE_KEY_PREFIX`: a string to prepend to all cache keys For controlling the behavior of cache key incrementation when any of them is already over the limit, you can use the following configuration: @@ -926,18 +1278,42 @@ The deployment type can be specified with the `REDIS_TYPE` / `REDIS_PERSECOND_TY 1. "sentinel": A comma separated list with the first string as the master name of the sentinel cluster followed by hostname:port pairs. The list size should be >= 2. The first item is the name of the master and the rest are the sentinels. 1. "cluster": A comma separated list of hostname:port pairs with all the nodes in the cluster. -## Pipelining +## Connection Pool Settings + +### Pool Size + +1. `REDIS_POOL_SIZE`: the number of connections to keep in the pool. Default: `10` +1. `REDIS_PERSECOND_POOL_SIZE`: pool size for per-second Redis. Default: `10` + +### Connection Timeout + +Controls the maximum duration for Redis connection establishment, read operations, and write operations. + +1. `REDIS_TIMEOUT`: sets the timeout for Redis connection and I/O operations. Default: `10s` +1. `REDIS_PERSECOND_TIMEOUT`: sets the timeout for per-second Redis connection and I/O operations. Default: `10s` + +### Pool On-Empty Behavior + +Controls what happens when all connections in the pool are in use and a new request arrives. + +1. `REDIS_POOL_ON_EMPTY_BEHAVIOR`: controls what happens when the pool is empty. Default: `CREATE` + - `CREATE`: create a new overflow connection after waiting for `REDIS_POOL_ON_EMPTY_WAIT_DURATION`. This is the [default radix behavior](https://github.com/mediocregopher/radix/blob/v3.8.1/pool.go#L291-L312). + - `ERROR`: return an error after waiting for `REDIS_POOL_ON_EMPTY_WAIT_DURATION`. This enforces a strict pool size limit. + - `WAIT`: block until a connection becomes available. This enforces a strict pool size limit but may cause goroutine buildup. +1. `REDIS_POOL_ON_EMPTY_WAIT_DURATION`: the duration to wait before taking the configured action (`CREATE` or `ERROR`). Default: `1s` +1. `REDIS_PERSECOND_POOL_ON_EMPTY_BEHAVIOR`: same as above for per-second Redis pool. Default: `CREATE` +1. `REDIS_PERSECOND_POOL_ON_EMPTY_WAIT_DURATION`: same as above for per-second Redis pool. Default: `1s` + +### Pipelining By default, for each request, ratelimit will pick up a connection from pool, write multiple redis commands in a single write then reads their responses in a single read. This reduces network delay. -For high throughput scenarios, ratelimit also support [implicit pipelining](https://github.com/mediocregopher/radix/blob/v3.5.1/pool.go#L238) . It can be configured using the following environment variables: +For high throughput scenarios, ratelimit supports write buffering via [radix v4's WriteFlushInterval](https://pkg.go.dev/github.com/mediocregopher/radix/v4#Dialer). It can be configured using the following environment variables: -1. `REDIS_PIPELINE_WINDOW` & `REDIS_PERSECOND_PIPELINE_WINDOW`: sets the duration after which internal pipelines will be flushed. - If window is zero then implicit pipelining will be disabled. -1. `REDIS_PIPELINE_LIMIT` & `REDIS_PERSECOND_PIPELINE_LIMIT`: sets maximum number of commands that can be pipelined before flushing. - If limit is zero then no limit will be used and pipelines will only be limited by the specified time window. +1. `REDIS_PIPELINE_WINDOW` & `REDIS_PERSECOND_PIPELINE_WINDOW`: controls how often buffered writes are flushed to the network connection. When set to a non-zero value (e.g., 150us-500us), radix v4 will buffer multiple concurrent write operations and flush them together, reducing system calls and improving throughput. If zero, each write is flushed immediately. **Required for Redis Cluster mode.** +1. `REDIS_PIPELINE_LIMIT` & `REDIS_PERSECOND_PIPELINE_LIMIT`: **DEPRECATED** - These settings have no effect in radix v4. Write buffering is controlled solely by the window settings above. -`implicit pipelining` is disabled by default. To enable it, you can use default values [used by radix](https://github.com/mediocregopher/radix/blob/v3.5.1/pool.go#L278) and tune for the optimal value. +Write buffering is disabled by default (window = 0). For optimal performance, set `REDIS_PIPELINE_WINDOW` to 150us-500us depending on your latency requirements and load patterns. ## One Redis Instance @@ -984,6 +1360,9 @@ To configure a Memcache instance use the following environment variables instead 1. `BACKEND_TYPE=memcache` 1. `CACHE_KEY_PREFIX`: a string to prepend to all cache keys 1. `MEMCACHE_MAX_IDLE_CONNS=2`: the maximum number of idle TCP connections per memcache node, `2` is the default of the underlying library +1. `MEMCACHE_TLS`: set to `"true"` to connect to the server with TLS. +1. `MEMCACHE_TLS_CLIENT_CERT`, `MEMCACHE_TLS_CLIENT_KEY`, and `MEMCACHE_TLS_CACERT` to provide files that parameterize the memcache client TLS connection configuration. +1. `MEMCACHE_TLS_SKIP_HOSTNAME_VERIFICATION` set to `"true"` will skip hostname verification in environments where the certificate has an invalid hostname. With memcache mode increments will happen asynchronously, so it's technically possible for a client to exceed quota briefly if multiple requests happen at exactly the same time. @@ -1028,17 +1407,26 @@ otelcol-contrib --config examples/otlp-collector/config.yaml docker run -d --name jaeger -p 16686:16686 -p 14250:14250 jaegertracing/all-in-one:1.33 ``` +# TLS + +Ratelimit supports TLS for it's gRPC endpoint. + +The following environment variables control the TLS feature: + +1. `GRPC_SERVER_USE_TLS` - Enables gRPC connections to server over TLS +1. `GRPC_SERVER_TLS_CERT` - Path to the file containing the server cert chain +1. `GRPC_SERVER_TLS_KEY` - Path to the file containing the server private key + +Ratelimit uses [goruntime](https://github.com/lyft/goruntime) to watch the TLS certificate and key and will hot reload them on changes. + # mTLS Ratelimit supports mTLS when Envoy sends requests to the service. -The following environment variables control the mTLS feature: +TLS must be enabled on the gRPC endpoint in order for mTLS to work see [TLS](#TLS). The following variables can be set to enable mTLS on the Ratelimit service. -1. `GRPC_SERVER_USE_TLS` - Enables gprc connections to server over TLS -1. `GRPC_SERVER_TLS_CERT` - Path to the file containing the server cert chain -1. `GRPC_SERVER_TLS_KEY` - Path to the file containing the server private key 1. `GRPC_CLIENT_TLS_CACERT` - Path to the file containing the client CA certificate. 1. `GRPC_CLIENT_TLS_SAN` - (Optional) DNS Name to validate from the client cert during mTLS auth diff --git a/api/ratelimit/config/ratelimit/v3/rls_conf.proto b/api/ratelimit/config/ratelimit/v3/rls_conf.proto index cdb1836fd..31617838c 100644 --- a/api/ratelimit/config/ratelimit/v3/rls_conf.proto +++ b/api/ratelimit/config/ratelimit/v3/rls_conf.proto @@ -42,6 +42,9 @@ message RateLimitDescriptor { // Mark the descriptor as shadow. When the values is true, rate limit service allow requests to the backend. bool shadow_mode = 5; + + // Setting the `detailed_metric: true` for a descriptor will extend the metrics that are produced. + bool detailed_metric = 6; } // Rate-limit policy. @@ -88,17 +91,13 @@ enum RateLimitUnit { // The time unit representing a day. DAY = 4; -} -// [#protodoc-title: Rate Limit Config Discovery Service (RLS Conf DS)] + // The time unit representing a week. + WEEK = 7; -// Return list of all rate limit configs that rate limit service should be configured with. -service RateLimitConfigDiscoveryService { - rpc StreamRlsConfigs(stream envoy.service.discovery.v3.DiscoveryRequest) - returns (stream envoy.service.discovery.v3.DiscoveryResponse) { - } + // The time unit representing a month. + MONTH = 5; - rpc FetchRlsConfigs(envoy.service.discovery.v3.DiscoveryRequest) - returns (envoy.service.discovery.v3.DiscoveryResponse) { - } + // The time unit representing a year. + YEAR = 6; } diff --git a/api/ratelimit/service/ratelimit/v3/rls_conf_ds.proto b/api/ratelimit/service/ratelimit/v3/rls_conf_ds.proto new file mode 100644 index 000000000..3aa6525c7 --- /dev/null +++ b/api/ratelimit/service/ratelimit/v3/rls_conf_ds.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; + +package ratelimit.service.ratelimit.v3; + +import "envoy/service/discovery/v3/discovery.proto"; + +option java_package = "io.envoyproxy.ratelimit.service.config.v3"; +option java_outer_classname = "RlsConfigProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/ratelimit/service/config/v3;configv3"; +option java_generic_services = true; + +// [#protodoc-title: Rate Limit Config Discovery Service (RLS Conf DS)] + +// Return list of all rate limit configs that rate limit service should be configured with. +service RateLimitConfigDiscoveryService { + + rpc StreamRlsConfigs(stream envoy.service.discovery.v3.DiscoveryRequest) + returns (stream envoy.service.discovery.v3.DiscoveryResponse) { + } + + rpc FetchRlsConfigs(envoy.service.discovery.v3.DiscoveryRequest) + returns (envoy.service.discovery.v3.DiscoveryResponse) { + } +} diff --git a/docker-compose.yml b/docker-compose.yml index 6f1b9f0fb..1a35cca8a 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -18,39 +18,20 @@ services: networks: - ratelimit-network - # minimal container that builds the ratelimit service binary and exits. - ratelimit-build: - image: golang:1.21.5-alpine - working_dir: /go/src/github.com/envoyproxy/ratelimit - command: go build -o /usr/local/bin/ratelimit ./src/service_cmd/main.go - volumes: - - .:/go/src/github.com/envoyproxy/ratelimit - - binary:/usr/local/bin/ - - ratelimit-client-build: - image: golang:1.21.5-alpine - working_dir: /go/src/github.com/envoyproxy/ratelimit - command: go build -o /usr/local/bin/ratelimit_client ./src/client_cmd/main.go - volumes: - - .:/go/src/github.com/envoyproxy/ratelimit - - binary:/usr/local/bin/ - ratelimit: - image: alpine:3.6 - command: > - sh -c "until test -f /usr/local/bin/ratelimit; do sleep 5; done; /usr/local/bin/ratelimit" + build: + context: . + dockerfile: Dockerfile + command: /bin/ratelimit ports: - 8080:8080 - 8081:8081 - 6070:6070 depends_on: - redis - - ratelimit-build - - ratelimit-client-build networks: - ratelimit-network volumes: - - binary:/usr/local/bin/ - ./examples:/data environment: - USE_STATSD=false @@ -63,6 +44,3 @@ services: networks: ratelimit-network: - -volumes: - binary: diff --git a/examples/xds-sotw-config-server/Dockerfile b/examples/xds-sotw-config-server/Dockerfile index 98388a721..4bba4fc9a 100644 --- a/examples/xds-sotw-config-server/Dockerfile +++ b/examples/xds-sotw-config-server/Dockerfile @@ -1,11 +1,11 @@ -FROM golang:1.21.5@sha256:672a2286da3ee7a854c3e0a56e0838918d0dbb1c18652992930293312de898a6 AS build +FROM golang:1.25.6@sha256:fc24d3881a021e7b968a4610fc024fba749f98fe5c07d4f28e6cfa14dc65a84c AS build WORKDIR /xds-server COPY . . RUN CGO_ENABLED=0 GOOS=linux go build -o /go/bin/xds-server -v main/main.go -FROM alpine:3.16@sha256:e4cdb7d47b06ba0a062ad2a97a7d154967c8f83934594d9f2bd3efa89292996b AS final +FROM alpine:3.20@sha256:77726ef6b57ddf65bb551896826ec38bc3e53f75cdde31354fbffb4f25238ebd AS final RUN apk --no-cache add ca-certificates && apk --no-cache update COPY --from=build /go/bin/xds-server /bin/xds-server ENTRYPOINT [ "/bin/xds-server" ] diff --git a/examples/xds-sotw-config-server/go.mod b/examples/xds-sotw-config-server/go.mod index a6162ea0a..67e53db8d 100644 --- a/examples/xds-sotw-config-server/go.mod +++ b/examples/xds-sotw-config-server/go.mod @@ -3,20 +3,21 @@ module github.com/envoyproxy/ratelimit/examples/xds-sotw-config-server go 1.21.5 require ( - github.com/envoyproxy/go-control-plane v0.11.1 - google.golang.org/grpc v1.59.0 + github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b + google.golang.org/grpc v1.65.0 ) require ( + cel.dev/expr v0.15.0 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect - github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect - github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect - google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect - google.golang.org/protobuf v1.31.0 // indirect + github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/protobuf v1.34.1 // indirect ) diff --git a/examples/xds-sotw-config-server/go.sum b/examples/xds-sotw-config-server/go.sum index b923eb810..6c12801f8 100644 --- a/examples/xds-sotw-config-server/go.sum +++ b/examples/xds-sotw-config-server/go.sum @@ -1,82 +1,38 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +cel.dev/expr v0.15.0 h1:O1jzfJCQBfL5BFoYktaxwIhuttaQPsVWerH9/EEKx0w= +cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= -github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= -github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b h1:M0BhcNaW04UV1haQO8IFSDB64dAeiBSsTMZks/sYDcQ= +github.com/envoyproxy/go-control-plane v0.12.1-0.20240123181358-841e293a220b/go.mod h1:lFu6itz1hckLR2A3aJ+ZKf3lu8HpjTsJSsqvVF6GL6g= +github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795 h1:pH+U6pJP0BhxqQ4njBUjOg0++WMMvv3eByWzB+oATBY= +github.com/planetscale/vtprotobuf v0.5.1-0.20231212170721-e7d721933795/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= -google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= -google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= -google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/examples/xds-sotw-config-server/resource.go b/examples/xds-sotw-config-server/resource.go index 71df63249..5792f156a 100644 --- a/examples/xds-sotw-config-server/resource.go +++ b/examples/xds-sotw-config-server/resource.go @@ -93,7 +93,8 @@ func makeRlsConfig() []types.Resource { }, Descriptors: []*rls_config.RateLimitDescriptor{ { - Key: "bar", + Key: "bar", + DetailedMetric: true, RateLimit: &rls_config.RateLimitPolicy{ Unit: rls_config.RateLimitUnit_MINUTE, RequestsPerUnit: 3, diff --git a/go.mod b/go.mod index 5d1c386d1..de47ebb1d 100644 --- a/go.mod +++ b/go.mod @@ -1,59 +1,73 @@ module github.com/envoyproxy/ratelimit -go 1.21.5 +go 1.25.6 require ( - github.com/alicebob/miniredis/v2 v2.31.0 + github.com/DataDog/datadog-go/v5 v5.5.0 + github.com/alicebob/miniredis/v2 v2.33.0 github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 github.com/coocood/freecache v1.2.4 - github.com/envoyproxy/go-control-plane v0.11.1 + github.com/envoyproxy/go-control-plane v0.13.4 + github.com/envoyproxy/go-control-plane/envoy v1.32.4 + github.com/envoyproxy/go-control-plane/ratelimit v0.1.1-0.20250812085011-4cf7e8485428 + github.com/go-kit/log v0.2.1 github.com/golang/mock v1.6.0 - github.com/golang/protobuf v1.5.3 - github.com/google/uuid v1.4.0 + github.com/google/go-cmp v0.7.0 + github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.1 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 - github.com/kavu/go_reuseport v1.5.0 + github.com/jpillora/backoff v1.0.0 github.com/kelseyhightower/envconfig v1.4.0 + github.com/libp2p/go-reuseport v0.4.0 github.com/lyft/goruntime v0.3.0 - github.com/lyft/gostats v0.4.12 - github.com/mediocregopher/radix/v3 v3.8.1 + github.com/lyft/gostats v0.4.14 + github.com/mediocregopher/radix/v4 v4.1.4 + github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_model v0.6.1 + github.com/prometheus/statsd_exporter v0.26.1 github.com/sirupsen/logrus v1.9.3 - github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 - go.opentelemetry.io/otel v1.21.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 - go.opentelemetry.io/otel/sdk v1.21.0 - go.opentelemetry.io/otel/trace v1.21.0 - golang.org/x/net v0.19.0 - google.golang.org/grpc v1.59.0 - google.golang.org/protobuf v1.31.0 + github.com/stretchr/testify v1.10.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 + go.opentelemetry.io/otel v1.36.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 + go.opentelemetry.io/otel/sdk v1.36.0 + go.opentelemetry.io/otel/trace v1.36.0 + golang.org/x/net v0.42.0 + google.golang.org/grpc v1.74.2 + google.golang.org/protobuf v1.36.10 gopkg.in/yaml.v2 v2.4.0 ) require ( + cel.dev/expr v0.24.0 // indirect + github.com/Microsoft/go-winio v0.5.0 // indirect github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 // indirect - github.com/cenkalti/backoff/v4 v4.2.1 // indirect - github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect + github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/objx v0.5.1 // indirect + github.com/prometheus/common v0.48.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/tilinna/clock v1.0.2 // indirect github.com/yuin/gopher-lua v1.1.1 // indirect - go.opentelemetry.io/otel/metric v1.21.0 // indirect - go.opentelemetry.io/proto/otlp v1.0.0 // indirect - golang.org/x/sys v0.15.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect - google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/otel/metric v1.36.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect + golang.org/x/sys v0.34.0 // indirect + golang.org/x/text v0.27.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250728155136-f173205681a0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 3da22d743..88c160e27 100644 --- a/go.sum +++ b/go.sum @@ -1,34 +1,30 @@ +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DmitriyVTitov/size v1.5.0/go.mod h1:le6rNI4CoLQV1b9gzp1+3d7hMAD/uu2QcJ+aYbNgiU0= -github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= +github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= +github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 h1:uvdUDbHQHO85qeSydJtItA4T55Pw6BtAejd0APRJOCE= github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= -github.com/alicebob/miniredis/v2 v2.31.0 h1:ObEFUNlJwoIiyjxdrYF0QIDE7qXcLc7D3WpSH4c22PU= -github.com/alicebob/miniredis/v2 v2.31.0/go.mod h1:UB/T2Uztp7MlFSDakaX1sTXUv5CASoprx0wulRT6HBg= +github.com/alicebob/miniredis/v2 v2.33.0 h1:uvTF0EDeu9RLnUEG27Db5I68ESoIxTiXbNUiji6lZrA= +github.com/alicebob/miniredis/v2 v2.33.0/go.mod h1:MhP4a3EU7aENRi9aO+tHfTBZicLqQevyi/DJpoj6mi0= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 h1:N7oVaKyGp8bttX0bfZGmcGkjz7DLQXhAn3DNd3T0ous= github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874/go.mod h1:r5xuitiExdLAJ09PR7vBVENGvp4ZuTBeWTGtxuX3K+c= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= -github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coocood/freecache v1.2.4 h1:UdR6Yz/X1HW4fZOuH0Z94KwG851GWOSknua5VUbb/5M= github.com/coocood/freecache v1.2.4/go.mod h1:RBUWa/Cy+OHdfTGFEhEuE1pMCMX51Ncizj7rthiQ3vk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -37,24 +33,33 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= -github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= +github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= +github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= +github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.1-0.20250812085011-4cf7e8485428 h1:AkYCu5lno5OO4HujsRKIDJ1XnqymM3saXdBoQ+lpWhk= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.1-0.20250812085011-4cf7e8485428/go.mod h1:EWppLjKDYW2tcLcDYvR1kDWCZWeMVxH/0v6vaYXo0eA= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= -github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= @@ -62,23 +67,21 @@ github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+Licev github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 h1:6UKoz5ujsI55KNpsJH3UwCq3T8kKbZwNZBNPuTTje8U= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1/go.mod h1:YvJ2f6MplWDhfxiUC3KpyTy76kYUZA4W3pTv/wdKQ9Y= -github.com/kavu/go_reuseport v1.5.0 h1:UNuiY2OblcqAtVDE8Gsg1kZz8zbBWg907sP1ceBV+bk= -github.com/kavu/go_reuseport v1.5.0/go.mod h1:CG8Ee7ceMFSMnx/xr25Vm0qXaj2Z4i5PWoUx+JZ5/CU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -92,63 +95,84 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= +github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= github.com/lyft/goruntime v0.3.0 h1:VLBYR4s3XazkUT8lLtq9CJrt58YmLQQumrK3ktenEkI= github.com/lyft/goruntime v0.3.0/go.mod h1:BW1gngSpMJR9P9w23BPUPdhdbUWhpirl98TQhOWWMF4= github.com/lyft/gostats v0.4.1/go.mod h1:Tpx2xRzz4t+T2Tx0xdVgIoBdR2UMVz+dKnE3X01XSd8= -github.com/lyft/gostats v0.4.12 h1:vaQMrsY4QH9GOeJUkZ7bHm8kqS92IhHuuwh7vTQ4qyQ= -github.com/lyft/gostats v0.4.12/go.mod h1:rMGud5RRaGYMG0KPS0GAUSBBs69yFMOMYjAnmcPTaG8= -github.com/mediocregopher/radix/v3 v3.8.1 h1:rOkHflVuulFKlwsLY01/M2cM2tWCjDoETcMqKbAWu1M= -github.com/mediocregopher/radix/v3 v3.8.1/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= +github.com/lyft/gostats v0.4.14 h1:xmP4yMfDvEKtlNZEcS2sYz0cvnps1ri337ZEEbw3ab8= +github.com/lyft/gostats v0.4.14/go.mod h1:cJWqEVL8JIewIJz/olUIios2F1q06Nc51hXejPQmBH0= +github.com/mediocregopher/radix/v4 v4.1.4 h1:Uze6DEbEAvL+VHXUEu/EDBTkUk5CLct5h3nVSGpc6Ts= +github.com/mediocregopher/radix/v4 v4.1.4/go.mod h1:ajchozX/6ELmydxWeWM6xCFHVpZ4+67LXHOTOVR0nCE= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= +github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/statsd_exporter v0.26.1 h1:ucbIAdPmwAUcA+dU+Opok8Qt81Aw8HanlO+2N/Wjv7w= +github.com/prometheus/statsd_exporter v0.26.1/go.mod h1:XlDdjAmRmx3JVvPPYuFNUg+Ynyb5kR69iPPkQjxXFMk= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.1 h1:4VhoImhV/Bm0ToFkXFi8hXNXwpDRZ/ynw3amt82mzq0= -github.com/stretchr/objx v0.5.1/go.mod h1:/iHQpkQwBD6DLUmQ4pE+s1TXdob1mORJ4/UFdrifcy0= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tilinna/clock v1.0.2 h1:6BO2tyAC9JbPExKH/z9zl44FLu1lImh3nDNKA0kgrkI= +github.com/tilinna/clock v1.0.2/go.mod h1:ZsP7BcY7sEEz7ktc0IVy8Us6boDrK8VradlKRUGfOao= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= -go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= -go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -175,11 +199,9 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= +golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= -golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -187,25 +209,26 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= +golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= +golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -220,32 +243,24 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4 h1:W12Pwm4urIbRdGhMEg2NM9O3TWKjNcxQhs46V0ypf/k= -google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4/go.mod h1:5RBcpGRxr25RbDzY5w+dmaqpSEvl8Gwl1x2CICf60ic= -google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 h1:ZcOkrmX74HbKFYnpPY8Qsw93fC29TbJXspYKaBkSXDQ= -google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4/go.mod h1:k2dtGpRrbsSyKcNPKKI5sstZkrNCZwpU/ns96JoHbGg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM= +google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a h1:SGktgSolFCo75dnHJF2yMvnns6jCmHFJ0vE4Vn2JKvQ= +google.golang.org/genproto/googleapis/api v0.0.0-20250528174236-200df99c418a/go.mod h1:a77HrdMjoeKbnd2jmgcWdaS++ZLZAEq3orIOAEIKiVw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250728155136-f173205681a0 h1:MAKi5q709QWfnkkpNQ0M12hYJ1+e8qYVDyowc4U1XZM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250728155136-f173205681a0/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= +google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/integration-test/Dockerfile.tester b/integration-test/Dockerfile.tester index 18979fd96..9d588530e 100644 --- a/integration-test/Dockerfile.tester +++ b/integration-test/Dockerfile.tester @@ -1,4 +1,4 @@ -FROM alpine@sha256:51b67269f354137895d43f3b3d810bfacd3945438e94dc5ac55fdac340352f48 +FROM alpine@sha256:4b7ce07002c69e8f3d704a9c5d6fd3053be500b7f1c69fc0d80990c2ad8dd412 USER root diff --git a/integration-test/scripts/value-included-in-stats-key-when-unspecified.sh b/integration-test/scripts/value-included-in-stats-key-when-unspecified.sh index a3851cef7..6fc8b486b 100755 --- a/integration-test/scripts/value-included-in-stats-key-when-unspecified.sh +++ b/integration-test/scripts/value-included-in-stats-key-when-unspecified.sh @@ -24,7 +24,7 @@ if [ $? -eq 0 ]; then fi # Sleep a bit to allow the stats to be propagated -sleep 2 +sleep 5 # Extract the metric for the unspecified value, which shoulb be there due to the "detailed_metric" stats=$(curl -f -s statsd:9102/metrics | grep -e ratelimit_service_rate_limit_over_limit | grep unspec_unspecified_value | cut -d} -f2 | sed 's/ //g') diff --git a/src/config/config.go b/src/config/config.go index ca24d1e23..6e31883de 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -17,14 +17,18 @@ func (e RateLimitConfigError) Error() string { // Wrapper for an individual rate limit config entry which includes the defined limit and stats. type RateLimit struct { - FullKey string - Stats stats.RateLimitStats - Limit *pb.RateLimitResponse_RateLimit - Unlimited bool - ShadowMode bool - Name string - Replaces []string - IncludeValueInMetricWhenNotSpecified bool + FullKey string + Stats stats.RateLimitStats + Limit *pb.RateLimitResponse_RateLimit + Unlimited bool + ShadowMode bool + QuotaMode bool + Name string + Replaces []string + DetailedMetric bool + // ShareThresholdKeyPattern is a slice of wildcard patterns for descriptor entries + // The slice index corresponds to the descriptor entry index. + ShareThresholdKeyPattern []string } // Interface for interacting with a loaded rate limit config. diff --git a/src/config/config_impl.go b/src/config/config_impl.go index 2bdbf9d43..98aa06eb9 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -26,12 +26,15 @@ type YamlRateLimit struct { } type YamlDescriptor struct { - Key string - Value string - RateLimit *YamlRateLimit `yaml:"rate_limit"` - Descriptors []YamlDescriptor - ShadowMode bool `yaml:"shadow_mode"` - IncludeMetricsForUnspecifiedValue bool `yaml:"detailed_metric"` + Key string + Value string + RateLimit *YamlRateLimit `yaml:"rate_limit"` + Descriptors []YamlDescriptor + ShadowMode bool `yaml:"shadow_mode"` + QuotaMode bool `yaml:"quota_mode"` + DetailedMetric bool `yaml:"detailed_metric"` + ValueToMetric bool `yaml:"value_to_metric"` + ShareThreshold bool `yaml:"share_threshold"` } type YamlRoot struct { @@ -40,9 +43,12 @@ type YamlRoot struct { } type rateLimitDescriptor struct { - descriptors map[string]*rateLimitDescriptor - limit *RateLimit - wildcardKeys []string + descriptors map[string]*rateLimitDescriptor + limit *RateLimit + wildcardKeys []string + valueToMetric bool + shareThreshold bool + wildcardPattern string // stores the wildcard pattern when share_threshold is true } type rateLimitDomain struct { @@ -65,9 +71,12 @@ var validKeys = map[string]bool{ "requests_per_unit": true, "unlimited": true, "shadow_mode": true, + "quota_mode": true, "name": true, "replaces": true, "detailed_metric": true, + "value_to_metric": true, + "share_threshold": true, } // Create a new rate limit config entry. @@ -77,20 +86,23 @@ var validKeys = map[string]bool{ // @param unlimited supplies whether the rate limit is unlimited // @return the new config entry. func NewRateLimit(requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, rlStats stats.RateLimitStats, - unlimited bool, shadowMode bool, name string, replaces []string, includeValueInMetricWhenNotSpecified bool) *RateLimit { - + unlimited bool, shadowMode bool, quotaMode bool, name string, replaces []string, detailedMetric bool, +) *RateLimit { return &RateLimit{ FullKey: rlStats.GetKey(), Stats: rlStats, Limit: &pb.RateLimitResponse_RateLimit{ RequestsPerUnit: requestsPerUnit, Unit: unit, + Name: name, }, - Unlimited: unlimited, - ShadowMode: shadowMode, - Name: name, - Replaces: replaces, - IncludeValueInMetricWhenNotSpecified: includeValueInMetricWhenNotSpecified, + Unlimited: unlimited, + ShadowMode: shadowMode, + QuotaMode: quotaMode, + Name: name, + Replaces: replaces, + DetailedMetric: detailedMetric, + ShareThresholdKeyPattern: nil, } } @@ -99,8 +111,8 @@ func (this *rateLimitDescriptor) dump() string { ret := "" if this.limit != nil { ret += fmt.Sprintf( - "%s: unit=%s requests_per_unit=%d, shadow_mode: %t\n", this.limit.FullKey, - this.limit.Limit.Unit.String(), this.limit.Limit.RequestsPerUnit, this.limit.ShadowMode) + "%s: unit=%s requests_per_unit=%d, shadow_mode: %t, quota_mode: %t\n", this.limit.FullKey, + this.limit.Limit.Unit.String(), this.limit.Limit.RequestsPerUnit, this.limit.ShadowMode, this.limit.QuotaMode) } for _, descriptor := range this.descriptors { ret += descriptor.dump() @@ -143,8 +155,7 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p if descriptorConfig.RateLimit != nil { unlimited := descriptorConfig.RateLimit.Unlimited - value, present := - pb.RateLimitResponse_RateLimit_Unit_value[strings.ToUpper(descriptorConfig.RateLimit.Unit)] + value, present := pb.RateLimitResponse_RateLimit_Unit_value[strings.ToUpper(descriptorConfig.RateLimit.Unit)] validUnit := present && value != int32(pb.RateLimitResponse_RateLimit_UNKNOWN) if unlimited { @@ -166,12 +177,12 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p rateLimit = NewRateLimit( descriptorConfig.RateLimit.RequestsPerUnit, pb.RateLimitResponse_RateLimit_Unit(value), - statsManager.NewStats(newParentKey), unlimited, descriptorConfig.ShadowMode, - descriptorConfig.RateLimit.Name, replaces, descriptorConfig.IncludeMetricsForUnspecifiedValue, + statsManager.NewStats(newParentKey), unlimited, descriptorConfig.ShadowMode, descriptorConfig.QuotaMode, + descriptorConfig.RateLimit.Name, replaces, descriptorConfig.DetailedMetric, ) rateLimitDebugString = fmt.Sprintf( - " ratelimit={requests_per_unit=%d, unit=%s, unlimited=%t, shadow_mode=%t}", rateLimit.Limit.RequestsPerUnit, - rateLimit.Limit.Unit.String(), rateLimit.Unlimited, rateLimit.ShadowMode) + " ratelimit={requests_per_unit=%d, unit=%s, unlimited=%t, shadow_mode=%t, quota_mode=%t}", rateLimit.Limit.RequestsPerUnit, + rateLimit.Limit.Unit.String(), rateLimit.Unlimited, rateLimit.ShadowMode, rateLimit.QuotaMode) for _, replaces := range descriptorConfig.RateLimit.Replaces { if replaces.Name == "" { @@ -183,16 +194,38 @@ func (this *rateLimitDescriptor) loadDescriptors(config RateLimitConfigToLoad, p } } - logger.Debugf( - "loading descriptor: key=%s%s", newParentKey, rateLimitDebugString) - newDescriptor := &rateLimitDescriptor{map[string]*rateLimitDescriptor{}, rateLimit, nil} - newDescriptor.loadDescriptors(config, newParentKey+".", descriptorConfig.Descriptors, statsManager) - this.descriptors[finalKey] = newDescriptor + // Validate share_threshold can only be used with wildcards + if descriptorConfig.ShareThreshold { + if len(finalKey) == 0 || finalKey[len(finalKey)-1:] != "*" { + panic(newRateLimitConfigError( + config.Name, + fmt.Sprintf("share_threshold can only be used with wildcard values (ending with '*'), but found key '%s'", finalKey))) + } + } + + // Store wildcard pattern if share_threshold is enabled + var wildcardPattern string = "" + if descriptorConfig.ShareThreshold && len(finalKey) > 0 && finalKey[len(finalKey)-1:] == "*" { + wildcardPattern = finalKey + } // Preload keys ending with "*" symbol. if finalKey[len(finalKey)-1:] == "*" { this.wildcardKeys = append(this.wildcardKeys, finalKey) } + + logger.Debugf( + "loading descriptor: key=%s%s", newParentKey, rateLimitDebugString) + newDescriptor := &rateLimitDescriptor{ + descriptors: map[string]*rateLimitDescriptor{}, + limit: rateLimit, + wildcardKeys: nil, + valueToMetric: descriptorConfig.ValueToMetric, + shareThreshold: descriptorConfig.ShareThreshold, + wildcardPattern: wildcardPattern, + } + newDescriptor.loadDescriptors(config, newParentKey+".", descriptorConfig.Descriptors, statsManager) + this.descriptors[finalKey] = newDescriptor } } @@ -203,12 +236,12 @@ func validateYamlKeys(fileName string, config_map map[interface{}]interface{}) { for k, v := range config_map { if _, ok := k.(string); !ok { errorText := fmt.Sprintf("config error, key is not of type string: %v", k) - logger.Debugf(errorText) + logger.Debug(errorText) panic(newRateLimitConfigError(fileName, errorText)) } if _, ok := validKeys[k.(string)]; !ok { errorText := fmt.Sprintf("config error, unknown key '%s'", k) - logger.Debugf(errorText) + logger.Debug(errorText) panic(newRateLimitConfigError(fileName, errorText)) } switch v := v.(type) { @@ -216,7 +249,7 @@ func validateYamlKeys(fileName string, config_map map[interface{}]interface{}) { for _, e := range v { if _, ok := e.(map[interface{}]interface{}); !ok { errorText := fmt.Sprintf("config error, yaml file contains list of type other than map: %v", e) - logger.Debugf(errorText) + logger.Debug(errorText) panic(newRateLimitConfigError(fileName, errorText)) } element := e.(map[interface{}]interface{}) @@ -235,7 +268,7 @@ func validateYamlKeys(fileName string, config_map map[interface{}]interface{}) { case nil: default: errorText := "error checking config" - logger.Debugf(errorText) + logger.Debug(errorText) panic(newRateLimitConfigError(fileName, errorText)) } } @@ -262,7 +295,14 @@ func (this *rateLimitConfigImpl) loadConfig(config RateLimitConfigToLoad) { } logger.Debugf("loading domain: %s", root.Domain) - newDomain := &rateLimitDomain{rateLimitDescriptor{map[string]*rateLimitDescriptor{}, nil, nil}} + newDomain := &rateLimitDomain{rateLimitDescriptor{ + descriptors: map[string]*rateLimitDescriptor{}, + limit: nil, + wildcardKeys: nil, + valueToMetric: false, + shareThreshold: false, + wildcardPattern: "", + }} newDomain.loadDescriptors(config, root.Domain+".", root.Descriptors, this.statsManager) this.domains[root.Domain] = newDomain } @@ -277,13 +317,15 @@ func (this *rateLimitConfigImpl) Dump() string { } func (this *rateLimitConfigImpl) GetLimit( - ctx context.Context, domain string, descriptor *pb_struct.RateLimitDescriptor) *RateLimit { - + ctx context.Context, domain string, descriptor *pb_struct.RateLimitDescriptor, +) *RateLimit { logger.Debugf("starting get limit lookup") var rateLimit *RateLimit = nil value := this.domains[domain] if value == nil { logger.Debugf("unknown domain '%s'", domain) + domainStats := this.statsManager.NewDomainStats(domain) + domainStats.NotFound.Inc() return rateLimit } @@ -297,6 +339,7 @@ func (this *rateLimitConfigImpl) GetLimit( this.statsManager.NewStats(rateLimitKey), false, false, + false, "", []string{}, false, @@ -306,33 +349,131 @@ func (this *rateLimitConfigImpl) GetLimit( descriptorsMap := value.descriptors prevDescriptor := &value.rateLimitDescriptor + + // Build detailed metric as we traverse the list of descriptors + var detailedMetricFullKey strings.Builder + detailedMetricFullKey.WriteString(domain) + + // Build value_to_metric-enhanced metric key as we traverse + var valueToMetricFullKey strings.Builder + valueToMetricFullKey.WriteString(domain) + + // Track share_threshold patterns for entries matched via wildcard (using indexes) + // This allows share_threshold to work when wildcard has nested descriptors + var shareThresholdPatterns map[int]string + for i, entry := range descriptor.Entries { // First see if key_value is in the map. If that isn't in the map we look for just key // to check for a default value. finalKey := entry.Key + "_" + entry.Value + + detailedMetricFullKey.WriteString(".") + detailedMetricFullKey.WriteString(finalKey) + logger.Debugf("looking up key: %s", finalKey) nextDescriptor := descriptorsMap[finalKey] + var matchedWildcardKey string if nextDescriptor == nil && len(prevDescriptor.wildcardKeys) > 0 { for _, wildcardKey := range prevDescriptor.wildcardKeys { if strings.HasPrefix(finalKey, strings.TrimSuffix(wildcardKey, "*")) { nextDescriptor = descriptorsMap[wildcardKey] + matchedWildcardKey = wildcardKey break } } } + matchedUsingValue := nextDescriptor != nil if nextDescriptor == nil { finalKey = entry.Key logger.Debugf("looking up key: %s", finalKey) nextDescriptor = descriptorsMap[finalKey] + matchedUsingValue = false + } + + // Track share_threshold pattern when matching via wildcard, even if no rate_limit at this level + if matchedWildcardKey != "" && nextDescriptor != nil && nextDescriptor.shareThreshold && nextDescriptor.wildcardPattern != "" { + // Extract the value part from the wildcard pattern (e.g., "key_files*" -> "files*") + if shareThresholdPatterns == nil { + shareThresholdPatterns = make(map[int]string) + } + + wildcardValue := strings.TrimPrefix(nextDescriptor.wildcardPattern, entry.Key+"_") + shareThresholdPatterns[i] = wildcardValue + logger.Debugf("tracking share_threshold for entry index %d (key %s), wildcard pattern %s", i, entry.Key, wildcardValue) + } + + // Build value_to_metric metrics path for this level + valueToMetricFullKey.WriteString(".") + if nextDescriptor != nil { + // Determine the value to use for this entry + var valueToUse string + hasShareThreshold := shareThresholdPatterns[i] != "" + + if matchedWildcardKey != "" { + // Matched via wildcard + if hasShareThreshold { + // share_threshold: always use wildcard pattern with * + valueToUse = shareThresholdPatterns[i] + } else if nextDescriptor.valueToMetric { + // value_to_metric: use actual runtime value + valueToUse = entry.Value + } else { + // No flags: preserve wildcard pattern + valueToUse = strings.TrimPrefix(matchedWildcardKey, entry.Key+"_") + } + } else if matchedUsingValue { + // Matched explicit key+value in config (share_threshold can't apply here) + valueToUse = entry.Value + } else { + // Matched default key (no value) in config + if nextDescriptor.valueToMetric { + valueToUse = entry.Value + } + } + + // Write key and value (if any) + valueToMetricFullKey.WriteString(entry.Key) + if valueToUse != "" { + valueToMetricFullKey.WriteString("_") + valueToMetricFullKey.WriteString(valueToUse) + } + } else { + // No next descriptor found; still append something deterministic + valueToMetricFullKey.WriteString(entry.Key) } if nextDescriptor != nil && nextDescriptor.limit != nil { logger.Debugf("found rate limit: %s", finalKey) if i == len(descriptor.Entries)-1 { - rateLimit = nextDescriptor.limit + // Create a copy of the rate limit to avoid modifying the shared object + originalLimit := nextDescriptor.limit + rateLimit = &RateLimit{ + FullKey: originalLimit.FullKey, + Stats: originalLimit.Stats, + Limit: originalLimit.Limit, + Unlimited: originalLimit.Unlimited, + ShadowMode: originalLimit.ShadowMode, + QuotaMode: originalLimit.QuotaMode, + Name: originalLimit.Name, + Replaces: originalLimit.Replaces, + DetailedMetric: originalLimit.DetailedMetric, + // Initialize ShareThresholdKeyPattern with correct length, empty strings for entries without share_threshold + ShareThresholdKeyPattern: nil, + } + // Apply all tracked share_threshold patterns when we find the rate_limit + // This works whether the rate_limit is at the wildcard level or deeper + // Only entries with share_threshold will have non-empty patterns + if len(shareThresholdPatterns) > 0 { + rateLimit.ShareThresholdKeyPattern = make([]string, len(descriptor.Entries)) + } + + for idx, pattern := range shareThresholdPatterns { + rateLimit.ShareThresholdKeyPattern[idx] = pattern + logger.Debugf("share_threshold enabled for entry index %d, using wildcard pattern %s", idx, pattern) + } } else { logger.Debugf("request depth does not match config depth, there are more entries in the request's descriptor") } @@ -342,8 +483,11 @@ func (this *rateLimitConfigImpl) GetLimit( logger.Debugf("iterating to next level") descriptorsMap = nextDescriptor.descriptors } else { - if rateLimit != nil && rateLimit.IncludeValueInMetricWhenNotSpecified { - rateLimit = NewRateLimit(rateLimit.Limit.RequestsPerUnit, rateLimit.Limit.Unit, this.statsManager.NewStats(rateLimit.FullKey+"_"+entry.Value), rateLimit.Unlimited, rateLimit.ShadowMode, rateLimit.Name, rateLimit.Replaces, false) + if rateLimit != nil && rateLimit.DetailedMetric { + // Preserve ShareThresholdKeyPattern when recreating rate limit + originalShareThresholdKeyPattern := rateLimit.ShareThresholdKeyPattern + rateLimit = NewRateLimit(rateLimit.Limit.RequestsPerUnit, rateLimit.Limit.Unit, this.statsManager.NewStats(rateLimit.FullKey), rateLimit.Unlimited, rateLimit.ShadowMode, rateLimit.QuotaMode, rateLimit.Name, rateLimit.Replaces, rateLimit.DetailedMetric) + rateLimit.ShareThresholdKeyPattern = originalShareThresholdKeyPattern } break @@ -351,6 +495,52 @@ func (this *rateLimitConfigImpl) GetLimit( prevDescriptor = nextDescriptor } + // Replace metric with detailed metric, if leaf descriptor is detailed. + // If share_threshold is enabled, always use wildcard pattern with * + if rateLimit != nil && rateLimit.DetailedMetric { + // Check if any entry has share_threshold enabled + hasShareThreshold := rateLimit.ShareThresholdKeyPattern != nil && len(rateLimit.ShareThresholdKeyPattern) > 0 + if hasShareThreshold { + // Build metric key with wildcard pattern (including *) for entries with share_threshold + var shareThresholdMetricKey strings.Builder + shareThresholdMetricKey.WriteString(domain) + for i, entry := range descriptor.Entries { + shareThresholdMetricKey.WriteString(".") + if i < len(rateLimit.ShareThresholdKeyPattern) && rateLimit.ShareThresholdKeyPattern[i] != "" { + shareThresholdMetricKey.WriteString(entry.Key) + shareThresholdMetricKey.WriteString("_") + shareThresholdMetricKey.WriteString(rateLimit.ShareThresholdKeyPattern[i]) + } else { + // Include full key_value for entries without share_threshold + shareThresholdMetricKey.WriteString(entry.Key) + if entry.Value != "" { + shareThresholdMetricKey.WriteString("_") + shareThresholdMetricKey.WriteString(entry.Value) + } + } + } + shareThresholdKey := shareThresholdMetricKey.String() + rateLimit.FullKey = shareThresholdKey + rateLimit.Stats = this.statsManager.NewStats(shareThresholdKey) + } else { + detailedKey := detailedMetricFullKey.String() + rateLimit.FullKey = detailedKey + rateLimit.Stats = this.statsManager.NewStats(detailedKey) + } + } + + // If not using detailed metric, but any value_to_metric path produced a different key, + // override stats to use the value_to_metric-enhanced key + if rateLimit != nil && !rateLimit.DetailedMetric { + enhancedKey := valueToMetricFullKey.String() + if enhancedKey != rateLimit.FullKey { + // Recreate to ensure a clean stats struct, then set to enhanced stats + originalShareThresholdKeyPattern := rateLimit.ShareThresholdKeyPattern + rateLimit = NewRateLimit(rateLimit.Limit.RequestsPerUnit, rateLimit.Limit.Unit, this.statsManager.NewStats(enhancedKey), rateLimit.Unlimited, rateLimit.ShadowMode, rateLimit.QuotaMode, rateLimit.Name, rateLimit.Replaces, rateLimit.DetailedMetric) + rateLimit.ShareThresholdKeyPattern = originalShareThresholdKeyPattern + } + } + return rateLimit } @@ -381,7 +571,7 @@ func ConfigFileContentToYaml(fileName, content string) *YamlRoot { err := yaml.Unmarshal([]byte(content), &any) if err != nil { errorText := fmt.Sprintf("error loading config file: %s", err.Error()) - logger.Debugf(errorText) + logger.Debug(errorText) panic(newRateLimitConfigError(fileName, errorText)) } validateYamlKeys(fileName, any) @@ -390,7 +580,7 @@ func ConfigFileContentToYaml(fileName, content string) *YamlRoot { err = yaml.Unmarshal([]byte(content), &root) if err != nil { errorText := fmt.Sprintf("error loading config file: %s", err.Error()) - logger.Debugf(errorText) + logger.Debug(errorText) panic(newRateLimitConfigError(fileName, errorText)) } @@ -403,8 +593,8 @@ func ConfigFileContentToYaml(fileName, content string) *YamlRoot { // @param mergeDomainConfigs defines whether multiple configurations referencing the same domain will be merged or rejected throwing an error. // @return a new config. func NewRateLimitConfigImpl( - configs []RateLimitConfigToLoad, statsManager stats.Manager, mergeDomainConfigs bool) RateLimitConfig { - + configs []RateLimitConfigToLoad, statsManager stats.Manager, mergeDomainConfigs bool, +) RateLimitConfig { ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}, statsManager, mergeDomainConfigs} for _, config := range configs { ret.loadConfig(config) @@ -416,8 +606,8 @@ func NewRateLimitConfigImpl( type rateLimitConfigLoaderImpl struct{} func (this *rateLimitConfigLoaderImpl) Load( - configs []RateLimitConfigToLoad, statsManager stats.Manager, mergeDomainConfigs bool) RateLimitConfig { - + configs []RateLimitConfigToLoad, statsManager stats.Manager, mergeDomainConfigs bool, +) RateLimitConfig { return NewRateLimitConfigImpl(configs, statsManager, mergeDomainConfigs) } diff --git a/src/config/config_xds.go b/src/config/config_xds.go index 1e772c361..f6c67ce22 100644 --- a/src/config/config_xds.go +++ b/src/config/config_xds.go @@ -16,11 +16,12 @@ func rateLimitDescriptorsPbToYaml(pb []*rls_conf_v3.RateLimitDescriptor) []YamlD descriptors := make([]YamlDescriptor, len(pb)) for i, d := range pb { descriptors[i] = YamlDescriptor{ - Key: d.Key, - Value: d.Value, - RateLimit: rateLimitPolicyPbToYaml(d.RateLimit), - Descriptors: rateLimitDescriptorsPbToYaml(d.Descriptors), - ShadowMode: d.ShadowMode, + Key: d.Key, + Value: d.Value, + RateLimit: rateLimitPolicyPbToYaml(d.RateLimit), + Descriptors: rateLimitDescriptorsPbToYaml(d.Descriptors), + ShadowMode: d.ShadowMode, + DetailedMetric: d.DetailedMetric, } } diff --git a/src/godogstats/dogstatsd_sink.go b/src/godogstats/dogstatsd_sink.go new file mode 100644 index 000000000..022e3fc1f --- /dev/null +++ b/src/godogstats/dogstatsd_sink.go @@ -0,0 +1,132 @@ +package godogstats + +import ( + "regexp" + "strconv" + "strings" + "time" + + "github.com/DataDog/datadog-go/v5/statsd" + gostats "github.com/lyft/gostats" + logger "github.com/sirupsen/logrus" +) + +type godogStatsSink struct { + client *statsd.Client + config struct { + host string + port int + } + + mogrifier mogrifierMap +} + +// ensure that godogStatsSink implements gostats.Sink +var _ gostats.Sink = (*godogStatsSink)(nil) + +type goDogStatsSinkOption func(*godogStatsSink) + +func WithStatsdHost(host string) goDogStatsSinkOption { + return func(g *godogStatsSink) { + g.config.host = host + } +} + +func WithStatsdPort(port int) goDogStatsSinkOption { + return func(g *godogStatsSink) { + g.config.port = port + } +} + +// WithMogrifier adds a mogrifier to the sink. Map iteration order is randomized, to control order call multiple times. +func WithMogrifier(mogrifiers map[*regexp.Regexp]func([]string) (string, []string)) goDogStatsSinkOption { + return func(g *godogStatsSink) { + for m, h := range mogrifiers { + g.mogrifier = append(g.mogrifier, mogrifierEntry{ + matcher: m, + handler: h, + }) + } + } +} + +func WithMogrifierFromEnv(keys []string) goDogStatsSinkOption { + return func(g *godogStatsSink) { + mogrifier, err := newMogrifierMapFromEnv(keys) + if err != nil { + panic(err) + } + g.mogrifier = mogrifier + } +} + +func NewSink(opts ...goDogStatsSinkOption) (*godogStatsSink, error) { + sink := &godogStatsSink{} + for _, opt := range opts { + opt(sink) + } + client, err := statsd.New(sink.config.host+":"+strconv.Itoa(sink.config.port), statsd.WithoutClientSideAggregation()) + if err != nil { + return nil, err + } + sink.client = client + return sink, nil +} + +// separateTags separates the metric name and tags from the combined serialized metric name. +// e.g. given input: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits.__COMMIT=12345.__DEPLOY=67890" +// this should produce output: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits", ["COMMIT:12345", "DEPLOY:67890"] +// Aligns to how tags are serialized here https://github.com/lyft/gostats/blob/49e70f1b7932d146fecd991be04f8e1ad235452c/internal/tags/tags.go#L335 +func separateTags(name string) (string, []string) { + const ( + prefix = ".__" + sep = "=" + ) + + // split the name and tags about the first prefix for extra tags + shortName, tagString, hasTags := strings.Cut(name, prefix) + if !hasTags { + return name, nil + } + + // split the tags at every instance of prefix + tagPairs := strings.Split(tagString, prefix) + tags := make([]string, 0, len(tagPairs)) + for _, tagPair := range tagPairs { + // split the name + value by the seperator + tagName, tagValue, isValid := strings.Cut(tagPair, sep) + if !isValid { + logger.Debugf("godogstats sink found malformed extra tag: %v, string: %v", tagPair, name) + continue + } + tags = append(tags, tagName+":"+tagValue) + } + + return shortName, tags +} + +// mogrify takes a serialized metric name as input (internal gostats format) +// and returns a metric name and list of tags (dogstatsd output format) +// the output list of tags includes any "tags" that are serialized into the metric name, +// as well as any other tags emitted by the mogrifier config +func (g *godogStatsSink) mogrify(name string) (string, []string) { + name, extraTags := separateTags(name) + name, tags := g.mogrifier.mogrify(name) + return name, append(extraTags, tags...) +} + +func (g *godogStatsSink) FlushCounter(name string, value uint64) { + name, tags := g.mogrify(name) + g.client.Count(name, int64(value), tags, 1.0) +} + +func (g *godogStatsSink) FlushGauge(name string, value uint64) { + name, tags := g.mogrify(name) + g.client.Gauge(name, float64(value), tags, 1.0) +} + +func (g *godogStatsSink) FlushTimer(name string, milliseconds float64) { + name, tags := g.mogrify(name) + duration := time.Duration(milliseconds) * time.Millisecond + g.client.Timing(name, duration, tags, 1.0) +} diff --git a/src/godogstats/dogstatsd_sink_test.go b/src/godogstats/dogstatsd_sink_test.go new file mode 100644 index 000000000..0e021e923 --- /dev/null +++ b/src/godogstats/dogstatsd_sink_test.go @@ -0,0 +1,101 @@ +package godogstats + +import ( + "regexp" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSeparateExtraTags(t *testing.T) { + tests := []struct { + name string + givenMetric string + expectOutput string + expectTags []string + }{ + { + name: "no extra tags", + givenMetric: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits", + expectOutput: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits", + expectTags: nil, + }, + { + name: "one extra tags", + givenMetric: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits.__COMMIT=12345", + expectOutput: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits", + expectTags: []string{"COMMIT:12345"}, + }, + { + name: "two extra tags", + givenMetric: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits.__COMMIT=12345.__DEPLOY=6890", + expectOutput: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits", + expectTags: []string{"COMMIT:12345", "DEPLOY:6890"}, + }, + { + name: "invalid extra tag no value", + givenMetric: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits.__COMMIT", + expectOutput: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits", + expectTags: []string{}, + }, + } + + for _, tt := range tests { + actualName, actualTags := separateTags(tt.givenMetric) + + assert.Equal(t, tt.expectOutput, actualName) + assert.Equal(t, tt.expectTags, actualTags) + } +} + +func TestSinkMogrify(t *testing.T) { + g := &godogStatsSink{ + mogrifier: mogrifierMap{ + { + matcher: regexp.MustCompile(`^ratelimit\.(.*)$`), + handler: func(matches []string) (string, []string) { + return "custom." + matches[1], []string{"tag1:value1", "tag2:value2"} + }, + }, + }, + } + + tests := []struct { + name string + input string + expectedName string + expectedTags []string + }{ + { + name: "mogrify with match and extra tags", + input: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits.__COMMIT=12345.__DEPLOY=67890", + expectedName: "custom.service.rate_limit.mongo_cps.database_users.total_hits", + expectedTags: []string{"COMMIT:12345", "DEPLOY:67890", "tag1:value1", "tag2:value2"}, + }, + { + name: "mogrify with match without extra tags", + input: "ratelimit.service.rate_limit.mongo_cps.database_users.total_hits", + expectedName: "custom.service.rate_limit.mongo_cps.database_users.total_hits", + expectedTags: []string{"tag1:value1", "tag2:value2"}, + }, + { + name: "extra tags with no match", + input: "foo.service.rate_limit.mongo_cps.database_users.total_hits.__COMMIT=12345.__DEPLOY=67890", + expectedName: "foo.service.rate_limit.mongo_cps.database_users.total_hits", + expectedTags: []string{"COMMIT:12345", "DEPLOY:67890"}, + }, + { + name: "no mogrification", + input: "other.metric.name", + expectedName: "other.metric.name", + expectedTags: nil, + }, + } + + for _, tt := range tests { + actualName, actualTags := g.mogrify(tt.input) + + assert.Equal(t, tt.expectedName, actualName) + assert.Equal(t, tt.expectedTags, actualTags) + } +} diff --git a/src/godogstats/mogrifier_map.go b/src/godogstats/mogrifier_map.go new file mode 100644 index 000000000..8983a7f69 --- /dev/null +++ b/src/godogstats/mogrifier_map.go @@ -0,0 +1,116 @@ +package godogstats + +import ( + "fmt" + "regexp" + "strconv" + + "github.com/kelseyhightower/envconfig" +) + +var varFinder = regexp.MustCompile(`\$\d+`) // matches $0, $1, etc. + +const envPrefix = "DOG_STATSD_MOGRIFIER" // prefix for environment variables + +type mogrifierEntry struct { + matcher *regexp.Regexp // the matcher determines whether a mogrifier should run on a metric at all + handler func(matches []string) (name string, tags []string) // the handler takes the list of matches, and returns metric name and list of tags +} + +// mogrifierMap is an ordered map of regular expressions to functions that mogrify a name and return tags +type mogrifierMap []mogrifierEntry + +// makePatternHandler returns a function that replaces $0, $1, etc. in the pattern with the corresponding match +func makePatternHandler(pattern string) func([]string) string { + return func(matches []string) string { + return varFinder.ReplaceAllStringFunc(pattern, func(s string) string { + i, err := strconv.Atoi(s[1:]) + if i >= len(matches) || err != nil { + // Return the original placeholder if the index is out of bounds + // or the Atoi fails, though given the varFinder regex it should + // not be possible. + return s + } + return matches[i] + }) + } +} + +// newMogrifierMapFromEnv loads mogrifiers from environment variables +// keys is a list of mogrifier names to load +func newMogrifierMapFromEnv(keys []string) (mogrifierMap, error) { + mogrifiers := mogrifierMap{} + + type config struct { + Pattern string `envconfig:"PATTERN"` + Tags map[string]string `envconfig:"TAGS"` + Name string `envconfig:"NAME"` + } + + for _, mogrifier := range keys { + cfg := config{} + if err := envconfig.Process(envPrefix+"_"+mogrifier, &cfg); err != nil { + return nil, fmt.Errorf("failed to load mogrifier %s: %v", mogrifier, err) + } + + if cfg.Pattern == "" { + return nil, fmt.Errorf("no PATTERN specified for mogrifier %s", mogrifier) + } + + re, err := regexp.Compile(cfg.Pattern) + if err != nil { + return nil, fmt.Errorf("failed to compile pattern for %s: %s: %v", mogrifier, cfg.Pattern, err) + } + + if cfg.Name == "" { + return nil, fmt.Errorf("no NAME specified for mogrifier %s", mogrifier) + } + + nameHandler := makePatternHandler(cfg.Name) + tagHandlers := make(map[string]func([]string) string, len(cfg.Tags)) + for key, value := range cfg.Tags { + if key == "" { + return nil, fmt.Errorf("no key specified for tag %s for mogrifier %s", key, mogrifier) + } + tagHandlers[key] = makePatternHandler(value) + if value == "" { + return nil, fmt.Errorf("no value specified for tag %s for mogrifier %s", key, mogrifier) + } + } + + mogrifiers = append(mogrifiers, mogrifierEntry{ + matcher: re, + handler: func(matches []string) (string, []string) { + name := nameHandler(matches) + tags := make([]string, 0, len(tagHandlers)) + for tagKey, handler := range tagHandlers { + tagValue := handler(matches) + tags = append(tags, tagKey+":"+tagValue) + } + return name, tags + }, + }, + ) + + } + return mogrifiers, nil +} + +// mogrify applies the first mogrifier in the map that matches the name +func (m *mogrifierMap) mogrify(name string) (string, []string) { + if m == nil { + return name, nil + } + for _, mogrifier := range *m { + matches := mogrifier.matcher.FindStringSubmatch(name) + if len(matches) == 0 { + continue + } + + mogrifiedName, tags := mogrifier.handler(matches) + return mogrifiedName, tags + } + + // no mogrification + return name, nil +} diff --git a/src/godogstats/mogrifier_map_test.go b/src/godogstats/mogrifier_map_test.go new file mode 100644 index 000000000..f44d2bce3 --- /dev/null +++ b/src/godogstats/mogrifier_map_test.go @@ -0,0 +1,205 @@ +package godogstats + +import ( + "regexp" + "testing" + + "github.com/stretchr/testify/assert" +) + +func testMogrifier() mogrifierMap { + return mogrifierMap{ + { + matcher: regexp.MustCompile(`^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`), + handler: func(matches []string) (string, []string) { + name := "ratelimit.service.rate_limit." + matches[3] + tags := []string{"domain:" + matches[1], "descriptor:" + matches[2]} + return name, tags + }, + }, + } +} + +func TestMogrify(t *testing.T) { + m := testMogrifier() + // Test case 1 + name1 := "ratelimit.service.rate_limit.mongo_cps.database_users.within_limit" + expectedMogrifiedName1 := "ratelimit.service.rate_limit.within_limit" + expectedTags1 := []string{"domain:mongo_cps", "descriptor:database_users"} + mogrifiedName1, tags1 := m.mogrify(name1) + assert.Equal(t, expectedMogrifiedName1, mogrifiedName1) + assert.Equal(t, expectedTags1, tags1) +} + +func TestEmpty(t *testing.T) { + m := mogrifierMap{} + name, tags := m.mogrify("ratelimit.service.rate_limit.mongo_cps.database_users.within_limit") + assert.Equal(t, "ratelimit.service.rate_limit.mongo_cps.database_users.within_limit", name) + assert.Empty(t, tags) +} + +func TestNil(t *testing.T) { + var m mogrifierMap + name, tags := m.mogrify("ratelimit.service.rate_limit.mongo_cps.database_users.within_limit") + assert.Equal(t, "ratelimit.service.rate_limit.mongo_cps.database_users.within_limit", name) + assert.Empty(t, tags) +} + +func TestLoadMogrifiersFromEnv(t *testing.T) { + tests := []struct { + name string + envVars map[string]string + input string + expectOutput string + expectedTags []string + keys []string + }{ + { + name: "Simple replacement", + envVars: map[string]string{ + "DOG_STATSD_MOGRIFIER_TAG_PATTERN": `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`, + "DOG_STATSD_MOGRIFIER_TAG_NAME": "ratelimit.service.rate_limit.$3", + "DOG_STATSD_MOGRIFIER_TAG_TAGS": "domain:$1,descriptor:$2", + }, + input: "ratelimit.service.rate_limit.mongo_cps.database_users.within_limit", + expectOutput: "ratelimit.service.rate_limit.within_limit", + expectedTags: []string{"domain:mongo_cps", "descriptor:database_users"}, + keys: []string{"TAG"}, + }, + { + name: "Out of bounds index", + envVars: map[string]string{ + "DOG_STATSD_MOGRIFIER_TAG_PATTERN": `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`, + "DOG_STATSD_MOGRIFIER_TAG_NAME": "ratelimit.service.rate_limit.$3", + "DOG_STATSD_MOGRIFIER_TAG_TAGS": "domain:$1,descriptor:$5", + }, + input: "ratelimit.service.rate_limit.mongo_cps.database_users.within_limit", + expectOutput: "ratelimit.service.rate_limit.within_limit", + expectedTags: []string{"domain:mongo_cps", "descriptor:$5"}, + keys: []string{"TAG"}, + }, + { + name: "No placeholders in tags", + envVars: map[string]string{ + "DOG_STATSD_MOGRIFIER_TAG_PATTERN": `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`, + "DOG_STATSD_MOGRIFIER_TAG_NAME": "ratelimit.service.rate_limit.$3", + "DOG_STATSD_MOGRIFIER_TAG_TAGS": "domain:mongo_cps,descriptor:database_users", + }, + input: "ratelimit.service.rate_limit.mongo_cps.database_users.within_limit", + expectOutput: "ratelimit.service.rate_limit.within_limit", + expectedTags: []string{"domain:mongo_cps", "descriptor:database_users"}, + keys: []string{"TAG"}, + }, + { + name: "No matches", + envVars: map[string]string{ + "DOG_STATSD_MOGRIFIER_TAG_PATTERN": `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`, + "DOG_STATSD_MOGRIFIER_TAG_NAME": "ratelimit.service.rate_limit.$3", + "DOG_STATSD_MOGRIFIER_TAG_TAGS": "domain:$1,descriptor:$4", + }, + input: "some.unmatched.metric", + expectOutput: "some.unmatched.metric", + keys: []string{"TAG"}, + }, + { + name: "Two mogrifiers: First match", + envVars: map[string]string{ + "DOG_STATSD_MOGRIFIER_SPECIFIC_PATTERN": `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.foo$`, + "DOG_STATSD_MOGRIFIER_SPECIFIC_NAME": "custom.foo", + "DOG_STATSD_MOGRIFIER_SPECIFIC_TAGS": "domain:$1,descriptor:$2", + "DOG_STATSD_MOGRIFIER_WILDCARD_PATTERN": `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`, + "DOG_STATSD_MOGRIFIER_WILDCARD_NAME": "ratelimit.service.rate_limit.$3", + "DOG_STATSD_MOGRIFIER_WILDCARD_TAGS": "domain:$1,descriptor:$2", + }, + input: "ratelimit.service.rate_limit.mongo_cps.database_users.foo", + expectOutput: "custom.foo", + expectedTags: []string{"domain:mongo_cps", "descriptor:database_users"}, + keys: []string{"SPECIFIC", "WILDCARD"}, + }, + { + name: "Two mogrifiers: second match", + envVars: map[string]string{ + "DOG_STATSD_MOGRIFIER_SPECIFIC_PATTERN": `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.foo$`, + "DOG_STATSD_MOGRIFIER_SPECIFIC_NAME": "custom.foo", + "DOG_STATSD_MOGRIFIER_SPECIFIC_TAGS": "domain:$1,descriptor:$2", + "DOG_STATSD_MOGRIFIER_WILDCARD_PATTERN": `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`, + "DOG_STATSD_MOGRIFIER_WILDCARD_NAME": "ratelimit.service.rate_limit.$3", + "DOG_STATSD_MOGRIFIER_WILDCARD_TAGS": "domain:$1,descriptor:$2", + }, + input: "ratelimit.service.rate_limit.mongo_cps.database_users.within_limit", + expectOutput: "ratelimit.service.rate_limit.within_limit", + expectedTags: []string{"domain:mongo_cps", "descriptor:database_users"}, + keys: []string{"SPECIFIC", "WILDCARD"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Set environment variables + for key, value := range tt.envVars { + t.Setenv(key, value) + } + + mogrifiers, err := newMogrifierMapFromEnv(tt.keys) + assert.NoError(t, err) + assert.NotNil(t, mogrifiers) + assert.Len(t, mogrifiers, len(tt.keys)) + + name, tags := mogrifiers.mogrify(tt.input) + assert.Equal(t, tt.expectOutput, name) + assert.ElementsMatch(t, tt.expectedTags, tags) + }) + } +} + +func TestValidation(t *testing.T) { + t.Run("No settings will fail", func(t *testing.T) { + _, err := newMogrifierMapFromEnv([]string{"TAG"}) + assert.Error(t, err) + }) + + t.Run("EmptyPattern", func(t *testing.T) { + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_PATTERN", "") + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_NAME", "ratelimit.service.rate_limit.$3") + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_TAGS", "domain:$1,descriptor:$2") + _, err := newMogrifierMapFromEnv([]string{"TAG"}) + assert.Error(t, err) + }) + + t.Run("EmptyName", func(t *testing.T) { + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_PATTERN", `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`) + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_NAME", "") + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_TAGS", "domain:$1,descriptor:$2") + _, err := newMogrifierMapFromEnv([]string{"TAG"}) + assert.Error(t, err) + }) + + t.Run("EmptyTagKey", func(t *testing.T) { + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_PATTERN", `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`) + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_NAME", "ratelimit.service.rate_limit.$3") + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_TAGS", ":5") + _, err := newMogrifierMapFromEnv([]string{"TAG"}) + assert.Error(t, err) + }) + + t.Run("EmptyTagValue", func(t *testing.T) { + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_PATTERN", `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`) + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_NAME", "ratelimit.service.rate_limit.$3") + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_TAGS", "domain:$1,descriptor:") + _, err := newMogrifierMapFromEnv([]string{"TAG"}) + assert.Error(t, err) + }) + + t.Run("Success w/ No mogrifiers", func(t *testing.T) { + _, err := newMogrifierMapFromEnv([]string{}) + assert.NoError(t, err) + }) + + t.Run("Success w/ mogrifier", func(t *testing.T) { + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_PATTERN", `^ratelimit\.service\.rate_limit\.(.*)\.(.*)\.(.*)$`) + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_NAME", "ratelimit.service.rate_limit.$3") + t.Setenv("DOG_STATSD_MOGRIFIER_TAG_TAGS", "domain:$1,descriptor:$2") + _, err := newMogrifierMapFromEnv([]string{"TAG"}) + assert.NoError(t, err) + }) +} diff --git a/src/limiter/base_limiter.go b/src/limiter/base_limiter.go index b76366cf6..6aec1ac8a 100644 --- a/src/limiter/base_limiter.go +++ b/src/limiter/base_limiter.go @@ -26,14 +26,15 @@ type BaseRateLimiter struct { type LimitInfo struct { limit *config.RateLimit - limitBeforeIncrease uint32 - limitAfterIncrease uint32 - nearLimitThreshold uint32 - overLimitThreshold uint32 + limitBeforeIncrease uint64 + limitAfterIncrease uint64 + nearLimitThreshold uint64 + overLimitThreshold uint64 } -func NewRateLimitInfo(limit *config.RateLimit, limitBeforeIncrease uint32, limitAfterIncrease uint32, - nearLimitThreshold uint32, overLimitThreshold uint32) *LimitInfo { +func NewRateLimitInfo(limit *config.RateLimit, limitBeforeIncrease uint64, limitAfterIncrease uint64, + nearLimitThreshold uint64, overLimitThreshold uint64, +) *LimitInfo { return &LimitInfo{ limit: limit, limitBeforeIncrease: limitBeforeIncrease, limitAfterIncrease: limitAfterIncrease, nearLimitThreshold: nearLimitThreshold, overLimitThreshold: overLimitThreshold, @@ -43,7 +44,8 @@ func NewRateLimitInfo(limit *config.RateLimit, limitBeforeIncrease uint32, limit // Generates cache keys for given rate limit request. Each cache key is represented by a concatenation of // domain, descriptor and current timestamp. func (this *BaseRateLimiter) GenerateCacheKeys(request *pb.RateLimitRequest, - limits []*config.RateLimit, hitsAddend uint32) []CacheKey { + limits []*config.RateLimit, hitsAddends []uint64, +) []CacheKey { assert.Assert(len(request.Descriptors) == len(limits)) cacheKeys := make([]CacheKey, len(request.Descriptors)) now := this.timeSource.UnixNow() @@ -53,7 +55,7 @@ func (this *BaseRateLimiter) GenerateCacheKeys(request *pb.RateLimitRequest, cacheKeys[i] = this.cacheKeyGenerator.GenerateCacheKey(request.Domain, request.Descriptors[i], limits[i], now) // Increase statistics for limits hit by their respective requests. if limits[i] != nil { - limits[i].Stats.TotalHits.Add(uint64(hitsAddend)) + limits[i].Stats.TotalHits.Add(hitsAddends[i]) } } return cacheKeys @@ -72,14 +74,15 @@ func (this *BaseRateLimiter) IsOverLimitWithLocalCache(key string) bool { } func (this *BaseRateLimiter) IsOverLimitThresholdReached(limitInfo *LimitInfo) bool { - limitInfo.overLimitThreshold = limitInfo.limit.Limit.RequestsPerUnit + limitInfo.overLimitThreshold = uint64(limitInfo.limit.Limit.RequestsPerUnit) return limitInfo.limitAfterIncrease > limitInfo.overLimitThreshold } // Generates response descriptor status based on cache key, over the limit with local cache, over the limit and // near the limit thresholds. Thresholds are checked in order and are mutually exclusive. func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo *LimitInfo, - isOverLimitWithLocalCache bool, hitsAddend uint32) *pb.RateLimitResponse_DescriptorStatus { + isOverLimitWithLocalCache bool, hitsAddend uint64, +) *pb.RateLimitResponse_DescriptorStatus { if key == "" { return this.generateResponseDescriptorStatus(pb.RateLimitResponse_OK, nil, 0) @@ -88,15 +91,15 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo * isOverLimit := false if isOverLimitWithLocalCache { isOverLimit = true - limitInfo.limit.Stats.OverLimit.Add(uint64(hitsAddend)) - limitInfo.limit.Stats.OverLimitWithLocalCache.Add(uint64(hitsAddend)) + limitInfo.limit.Stats.OverLimit.Add(hitsAddend) + limitInfo.limit.Stats.OverLimitWithLocalCache.Add(hitsAddend) responseDescriptorStatus = this.generateResponseDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT, limitInfo.limit.Limit, 0) } else { - limitInfo.overLimitThreshold = limitInfo.limit.Limit.RequestsPerUnit + limitInfo.overLimitThreshold = uint64(limitInfo.limit.Limit.RequestsPerUnit) // The nearLimitThreshold is the number of requests that can be made before hitting the nearLimitRatio. // We need to know it in both the OK and OVER_LIMIT scenarios. - limitInfo.nearLimitThreshold = uint32(math.Floor(float64(float32(limitInfo.overLimitThreshold) * this.nearLimitRatio))) + limitInfo.nearLimitThreshold = uint64(math.Floor(float64(float32(limitInfo.overLimitThreshold) * this.nearLimitRatio))) logger.Debugf("cache key: %s current: %d", key, limitInfo.limitAfterIncrease) if limitInfo.limitAfterIncrease > limitInfo.overLimitThreshold { isOverLimit = true @@ -120,7 +123,7 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo * } } else { responseDescriptorStatus = this.generateResponseDescriptorStatus(pb.RateLimitResponse_OK, - limitInfo.limit.Limit, limitInfo.overLimitThreshold-limitInfo.limitAfterIncrease) + limitInfo.limit.Limit, uint32(limitInfo.overLimitThreshold-limitInfo.limitAfterIncrease)) // The limit is OK but we additionally want to know if we are near the limit. this.checkNearLimitThreshold(limitInfo, hitsAddend) @@ -140,7 +143,8 @@ func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo * } func NewBaseRateLimit(timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, - localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, statsManager stats.Manager) *BaseRateLimiter { + localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, statsManager stats.Manager, +) *BaseRateLimiter { return &BaseRateLimiter{ timeSource: timeSource, JitterRand: jitterRand, @@ -152,38 +156,38 @@ func NewBaseRateLimit(timeSource utils.TimeSource, jitterRand *rand.Rand, expira } } -func (this *BaseRateLimiter) checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) { +func (this *BaseRateLimiter) checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint64) { // Increase over limit statistics. Because we support += behavior for increasing the limit, we need to // assess if the entire hitsAddend were over the limit. That is, if the limit's value before adding the // N hits was over the limit, then all the N hits were over limit. // Otherwise, only the difference between the current limit value and the over limit threshold // were over limit hits. if limitInfo.limitBeforeIncrease >= limitInfo.overLimitThreshold { - limitInfo.limit.Stats.OverLimit.Add(uint64(hitsAddend)) + limitInfo.limit.Stats.OverLimit.Add(hitsAddend) } else { - limitInfo.limit.Stats.OverLimit.Add(uint64(limitInfo.limitAfterIncrease - limitInfo.overLimitThreshold)) + limitInfo.limit.Stats.OverLimit.Add(limitInfo.limitAfterIncrease - limitInfo.overLimitThreshold) // If the limit before increase was below the over limit value, then some of the hits were // in the near limit range. - limitInfo.limit.Stats.NearLimit.Add(uint64(limitInfo.overLimitThreshold - utils.Max(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease))) + limitInfo.limit.Stats.NearLimit.Add(limitInfo.overLimitThreshold - max(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease)) } } -func (this *BaseRateLimiter) checkNearLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) { +func (this *BaseRateLimiter) checkNearLimitThreshold(limitInfo *LimitInfo, hitsAddend uint64) { if limitInfo.limitAfterIncrease > limitInfo.nearLimitThreshold { // Here we also need to assess which portion of the hitsAddend were in the near limit range. // If all the hits were over the nearLimitThreshold, then all hits are near limit. Otherwise, // only the difference between the current limit value and the near limit threshold were near // limit hits. if limitInfo.limitBeforeIncrease >= limitInfo.nearLimitThreshold { - limitInfo.limit.Stats.NearLimit.Add(uint64(hitsAddend)) + limitInfo.limit.Stats.NearLimit.Add(hitsAddend) } else { - limitInfo.limit.Stats.NearLimit.Add(uint64(limitInfo.limitAfterIncrease - limitInfo.nearLimitThreshold)) + limitInfo.limit.Stats.NearLimit.Add(limitInfo.limitAfterIncrease - limitInfo.nearLimitThreshold) } } } -func (this *BaseRateLimiter) increaseShadowModeStats(isOverLimitWithLocalCache bool, limitInfo *LimitInfo, hitsAddend uint32) { +func (this *BaseRateLimiter) increaseShadowModeStats(isOverLimitWithLocalCache bool, limitInfo *LimitInfo, hitsAddend uint64) { // Increase shadow mode statistics. For the same reason as over limit stats, // if the limit value before adding the N hits over the limit, then all N hits were over limit. if isOverLimitWithLocalCache || limitInfo.limitBeforeIncrease >= limitInfo.overLimitThreshold { @@ -194,7 +198,8 @@ func (this *BaseRateLimiter) increaseShadowModeStats(isOverLimitWithLocalCache b } func (this *BaseRateLimiter) generateResponseDescriptorStatus(responseCode pb.RateLimitResponse_Code, - limit *pb.RateLimitResponse_RateLimit, limitRemaining uint32) *pb.RateLimitResponse_DescriptorStatus { + limit *pb.RateLimitResponse_RateLimit, limitRemaining uint32, +) *pb.RateLimitResponse_DescriptorStatus { if limit != nil { return &pb.RateLimitResponse_DescriptorStatus{ Code: responseCode, diff --git a/src/limiter/cache_key.go b/src/limiter/cache_key.go index 4aeab2049..8b2056fc5 100644 --- a/src/limiter/cache_key.go +++ b/src/limiter/cache_key.go @@ -46,8 +46,8 @@ func isPerSecondLimit(unit pb.RateLimitResponse_RateLimit_Unit) bool { // @param now supplies the current unix time. // @return CacheKey struct. func (this *CacheKeyGenerator) GenerateCacheKey( - domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit, now int64) CacheKey { - + domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit, now int64, +) CacheKey { if limit == nil { return CacheKey{ Key: "", @@ -63,10 +63,18 @@ func (this *CacheKeyGenerator) GenerateCacheKey( b.WriteString(domain) b.WriteByte('_') - for _, entry := range descriptor.Entries { + for i, entry := range descriptor.Entries { b.WriteString(entry.Key) b.WriteByte('_') - b.WriteString(entry.Value) + // If share_threshold is enabled for this entry index, use the wildcard pattern instead of the actual value + // Use entry index instead of key name to handle nested descriptors with same key names + valueToUse := entry.Value + if limit != nil && limit.ShareThresholdKeyPattern != nil && i < len(limit.ShareThresholdKeyPattern) { + if wildcardPattern := limit.ShareThresholdKeyPattern[i]; wildcardPattern != "" { + valueToUse = wildcardPattern + } + } + b.WriteString(valueToUse) b.WriteByte('_') } diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index a79451df3..c8bcd0774 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -17,7 +17,9 @@ package memcached import ( "context" + "crypto/tls" "math/rand" + "net" "strconv" "sync" "time" @@ -64,15 +66,15 @@ var _ limiter.RateLimitCache = (*rateLimitMemcacheImpl)(nil) func (this *rateLimitMemcacheImpl) DoLimit( ctx context.Context, request *pb.RateLimitRequest, - limits []*config.RateLimit) []*pb.RateLimitResponse_DescriptorStatus { - + limits []*config.RateLimit, +) []*pb.RateLimitResponse_DescriptorStatus { logger.Debugf("starting cache lookup") // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. - hitsAddend := utils.Max(1, request.HitsAddend) + hitsAddends := utils.GetHitsAddends(request) // First build a list of all cache keys that we are actually going to hit. - cacheKeys := this.baseRateLimiter.GenerateCacheKeys(request, limits, hitsAddend) + cacheKeys := this.baseRateLimiter.GenerateCacheKeys(request, limits, hitsAddends) isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) @@ -119,27 +121,27 @@ func (this *rateLimitMemcacheImpl) DoLimit( for i, cacheKey := range cacheKeys { rawMemcacheValue, ok := memcacheValues[cacheKey.Key] - var limitBeforeIncrease uint32 + var limitBeforeIncrease uint64 if ok { decoded, err := strconv.ParseInt(string(rawMemcacheValue.Value), 10, 32) if err != nil { logger.Errorf("Unexpected non-numeric value in memcached: %v", rawMemcacheValue) } else { - limitBeforeIncrease = uint32(decoded) + limitBeforeIncrease = uint64(decoded) } } - limitAfterIncrease := limitBeforeIncrease + hitsAddend + limitAfterIncrease := limitBeforeIncrease + hitsAddends[i] limitInfo := limiter.NewRateLimitInfo(limits[i], limitBeforeIncrease, limitAfterIncrease, 0, 0) responseDescriptorStatuses[i] = this.baseRateLimiter.GetResponseDescriptorStatus(cacheKey.Key, - limitInfo, isOverLimitWithLocalCache[i], hitsAddend) + limitInfo, isOverLimitWithLocalCache[i], hitsAddends[i]) } this.waitGroup.Add(1) - runAsync(func() { this.increaseAsync(cacheKeys, isOverLimitWithLocalCache, limits, uint64(hitsAddend)) }) + runAsync(func() { this.increaseAsync(cacheKeys, isOverLimitWithLocalCache, limits, hitsAddends) }) if AutoFlushForIntegrationTests { this.Flush() } @@ -148,14 +150,15 @@ func (this *rateLimitMemcacheImpl) DoLimit( } func (this *rateLimitMemcacheImpl) increaseAsync(cacheKeys []limiter.CacheKey, isOverLimitWithLocalCache []bool, - limits []*config.RateLimit, hitsAddend uint64) { + limits []*config.RateLimit, hitsAddends []uint64, +) { defer this.waitGroup.Done() for i, cacheKey := range cacheKeys { if cacheKey.Key == "" || isOverLimitWithLocalCache[i] { continue } - _, err := this.client.Increment(cacheKey.Key, hitsAddend) + _, err := this.client.Increment(cacheKey.Key, hitsAddends[i]) if err == memcache.ErrCacheMiss { expirationSeconds := utils.UnitToDivider(limits[i].Limit.Unit) if this.expirationJitterMaxSeconds > 0 { @@ -165,13 +168,13 @@ func (this *rateLimitMemcacheImpl) increaseAsync(cacheKeys []limiter.CacheKey, i // Need to add instead of increment. err = this.client.Add(&memcache.Item{ Key: cacheKey.Key, - Value: []byte(strconv.FormatUint(hitsAddend, 10)), + Value: []byte(strconv.FormatUint(hitsAddends[i], 10)), Expiration: int32(expirationSeconds), }) if err == memcache.ErrNotStored { // There was a race condition to do this add. We should be able to increment // now instead. - _, err := this.client.Increment(cacheKey.Key, hitsAddend) + _, err := this.client.Increment(cacheKey.Key, hitsAddends[i]) if err != nil { logger.Errorf("Failed to increment key %s after failing to add: %s", cacheKey.Key, err) continue @@ -221,12 +224,12 @@ func refreshServers(serverList *memcache.ServerList, srv string, resolver srv.Sr return nil } -func newMemcachedFromSrv(srv string, d time.Duration, resolver srv.SrvResolver) Client { +func newMemcachedFromSrv(srv string, d time.Duration, resolver srv.SrvResolver) *memcache.Client { serverList := new(memcache.ServerList) err := refreshServers(serverList, srv, resolver) if err != nil { errorText := "Unable to fetch servers from SRV" - logger.Errorf(errorText) + logger.Error(errorText) panic(MemcacheError(errorText)) } @@ -245,13 +248,22 @@ func newMemcacheFromSettings(s settings.Settings) Client { if s.MemcacheSrv != "" && len(s.MemcacheHostPort) > 0 { panic(MemcacheError("Both MEMCADHE_HOST_PORT and MEMCACHE_SRV are set")) } + var client *memcache.Client if s.MemcacheSrv != "" { logger.Debugf("Using MEMCACHE_SRV: %v", s.MemcacheSrv) - return newMemcachedFromSrv(s.MemcacheSrv, s.MemcacheSrvRefresh, new(srv.DnsSrvResolver)) + client = newMemcachedFromSrv(s.MemcacheSrv, s.MemcacheSrvRefresh, new(srv.DnsSrvResolver)) + } else { + logger.Debugf("Using MEMCACHE_HOST_PORT: %v", s.MemcacheHostPort) + client = memcache.New(s.MemcacheHostPort...) } - logger.Debugf("Usng MEMCACHE_HOST_PORT:: %v", s.MemcacheHostPort) - client := memcache.New(s.MemcacheHostPort...) client.MaxIdleConns = s.MemcacheMaxIdleConns + if s.MemcacheTls { + client.DialContext = func(ctx context.Context, network, address string) (net.Conn, error) { + var td tls.Dialer + td.Config = s.MemcacheTlsConfig + return td.DialContext(ctx, network, address) + } + } return client } @@ -290,7 +302,8 @@ func runAsync(task func()) { } func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRand *rand.Rand, - expirationJitterMaxSeconds int64, localCache *freecache.Cache, statsManager stats.Manager, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache { + expirationJitterMaxSeconds int64, localCache *freecache.Cache, statsManager stats.Manager, nearLimitRatio float32, cacheKeyPrefix string, +) limiter.RateLimitCache { return &rateLimitMemcacheImpl{ client: client, timeSource: timeSource, @@ -303,7 +316,8 @@ func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRan } func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.TimeSource, jitterRand *rand.Rand, - localCache *freecache.Cache, scope gostats.Scope, statsManager stats.Manager) limiter.RateLimitCache { + localCache *freecache.Cache, scope gostats.Scope, statsManager stats.Manager, +) limiter.RateLimitCache { return NewRateLimitCacheImpl( CollectStats(newMemcacheFromSettings(s), scope.Scope("memcache")), timeSource, diff --git a/src/provider/cert_provider.go b/src/provider/cert_provider.go new file mode 100644 index 000000000..79349489c --- /dev/null +++ b/src/provider/cert_provider.go @@ -0,0 +1,108 @@ +package provider + +import ( + "crypto/tls" + "path/filepath" + "sync" + + "github.com/lyft/goruntime/loader" + gostats "github.com/lyft/gostats" + logger "github.com/sirupsen/logrus" + + "github.com/envoyproxy/ratelimit/src/settings" +) + +// CertProvider will watch certDirectory for changes via goruntime/loader and reload the cert and key files +type CertProvider struct { + settings settings.Settings + runtime loader.IFace + runtimeUpdateEvent chan int + rootStore gostats.Store + certLock sync.RWMutex + cert *tls.Certificate + certDirectory string + certFile string + keyFile string +} + +// GetCertificateFunc returns a function compatible with tls.Config.GetCertificate, fetching the current certificate +func (p *CertProvider) GetCertificateFunc() func(*tls.ClientHelloInfo) (*tls.Certificate, error) { + return func(*tls.ClientHelloInfo) (*tls.Certificate, error) { + p.certLock.RLock() + defer p.certLock.RUnlock() + return p.cert, nil + } +} + +func (p *CertProvider) watch() { + p.runtime.AddUpdateCallback(p.runtimeUpdateEvent) + + go func() { + for { + logger.Debugf("CertProvider: waiting for runtime update") + <-p.runtimeUpdateEvent + logger.Debugf("CertProvider: got runtime update and reloading config") + p.reloadCert() + } + }() +} + +// reloadCert loads the cert and key files and updates the tls.Certificate in memory +func (p *CertProvider) reloadCert() { + tlsKeyPair, err := tls.LoadX509KeyPair(p.certFile, p.keyFile) + if err != nil { + logger.Errorf("CertProvider failed to load TLS key pair (%s, %s): %v", p.certFile, p.keyFile, err) + // panic in case there is no cert already loaded as this would mean starting up without TLS + if p.cert == nil { + logger.Fatalf("CertProvider failed to load any certificate, exiting.") + } + return // keep the old cert if we have one + } + p.certLock.Lock() + defer p.certLock.Unlock() + p.cert = &tlsKeyPair + logger.Infof("CertProvider reloaded cert from (%s, %s)", p.certFile, p.keyFile) +} + +// setupRuntime sets up the goruntime loader to watch the certDirectory +// Will panic if it fails to set up the loader +func (p *CertProvider) setupRuntime() { + var err error + + // runtimePath is the parent folder of certPath + runtimePath := filepath.Dir(p.certDirectory) + // runtimeSubdirectory is the name of the folder to watch, containing the certs + runtimeSubdirectory := filepath.Base(p.certDirectory) + + p.runtime, err = loader.New2( + runtimePath, + runtimeSubdirectory, + p.rootStore.ScopeWithTags("certs", p.settings.ExtraTags), + &loader.DirectoryRefresher{}, + loader.IgnoreDotFiles) + if err != nil { + logger.Fatalf("Failed to set up goruntime loader: %v", err) + } +} + +// NewCertProvider creates a new CertProvider +// Will panic if it fails to set up gruntime or fails to load the initial certificate +func NewCertProvider(settings settings.Settings, rootStore gostats.Store, certFile, keyFile string) *CertProvider { + certDirectory := filepath.Dir(certFile) + if certDirectory != filepath.Dir(keyFile) { + logger.Fatalf("certFile and keyFile must be in the same directory") + } + p := &CertProvider{ + settings: settings, + runtimeUpdateEvent: make(chan int), + rootStore: rootStore, + certDirectory: certDirectory, + certFile: certFile, + keyFile: keyFile, + } + p.setupRuntime() + // Initially load the certificate (or panic) + p.reloadCert() + go p.watch() + return p +} diff --git a/src/provider/xds_grpc_sotw_provider.go b/src/provider/xds_grpc_sotw_provider.go index fcacc212d..4a5d0621e 100644 --- a/src/provider/xds_grpc_sotw_provider.go +++ b/src/provider/xds_grpc_sotw_provider.go @@ -4,13 +4,14 @@ import ( "context" "fmt" "strings" + "time" "google.golang.org/grpc/metadata" corev3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" "github.com/envoyproxy/go-control-plane/pkg/resource/v3" - "github.com/golang/protobuf/ptypes/any" grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" + "github.com/jpillora/backoff" logger "github.com/sirupsen/logrus" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -67,6 +68,12 @@ func (p *XdsGrpcSotwProvider) Stop() { func (p *XdsGrpcSotwProvider) initXdsClient() { logger.Info("Starting xDS client connection for rate limit configurations") conn := p.initializeAndWatch() + b := &backoff.Backoff{ + Min: p.settings.XdsClientBackoffInitialInterval, + Max: p.settings.XdsClientBackoffMaxInterval, + Factor: p.settings.XdsClientBackoffRandomFactor, + Jitter: p.settings.XdsClientBackoffJitter, + } for retryEvent := range p.connectionRetryChannel { if conn != nil { @@ -76,10 +83,18 @@ func (p *XdsGrpcSotwProvider) initXdsClient() { logger.Info("Stopping xDS client watch for rate limit configurations") break } + d := p.getJitteredExponentialBackOffDuration(b) + logger.Debugf("Sleeping for %s using exponential backoff\n", d) + time.Sleep(d) conn = p.initializeAndWatch() } } +func (p *XdsGrpcSotwProvider) getJitteredExponentialBackOffDuration(b *backoff.Backoff) time.Duration { + logger.Debugf("Retry attempt# %f", b.Attempt()) + return b.Duration() +} + func (p *XdsGrpcSotwProvider) initializeAndWatch() *grpc.ClientConn { conn, err := p.getGrpcConnection() if err != nil { @@ -99,11 +114,8 @@ func (p *XdsGrpcSotwProvider) watchConfigs() { resp, err := p.adsClient.Fetch() if err != nil { logger.Errorf("Failed to receive configuration from xDS Management Server: %s", err.Error()) - if sotw.IsConnError(err) { - p.retryGrpcConn() - return - } - p.adsClient.Nack(err.Error()) + p.retryGrpcConn() + return } else { logger.Tracef("Response received from xDS Management Server: %v", resp) p.sendConfigs(resp.Resources) @@ -114,13 +126,23 @@ func (p *XdsGrpcSotwProvider) watchConfigs() { func (p *XdsGrpcSotwProvider) getGrpcConnection() (*grpc.ClientConn, error) { backOff := grpc_retry.BackoffLinearWithJitter(p.settings.ConfigGrpcXdsServerConnectRetryInterval, 0.5) logger.Infof("Dialing xDS Management Server: '%s'", p.settings.ConfigGrpcXdsServerUrl) - return grpc.Dial( - p.settings.ConfigGrpcXdsServerUrl, + grpcOptions := []grpc.DialOption{ p.getGrpcTransportCredentials(), grpc.WithBlock(), grpc.WithStreamInterceptor( grpc_retry.StreamClientInterceptor(grpc_retry.WithBackoff(backOff)), - )) + ), + } + maxRecvMsgSize := p.settings.XdsClientGrpcOptionsMaxMsgSizeInBytes + if maxRecvMsgSize != 0 { + logger.Infof("Setting xDS gRPC max receive message size to %d bytes", maxRecvMsgSize) + grpcOptions = append(grpcOptions, + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxRecvMsgSize))) + } + return grpc.Dial( + p.settings.ConfigGrpcXdsServerUrl, + grpcOptions..., + ) } func (p *XdsGrpcSotwProvider) getGrpcTransportCredentials() grpc.DialOption { @@ -136,7 +158,7 @@ func (p *XdsGrpcSotwProvider) getGrpcTransportCredentials() grpc.DialOption { return grpc.WithTransportCredentials(credentials.NewTLS(configGrpcXdsTlsConfig)) } -func (p *XdsGrpcSotwProvider) sendConfigs(resources []*any.Any) { +func (p *XdsGrpcSotwProvider) sendConfigs(resources []*anypb.Any) { defer func() { if e := recover(); e != nil { p.configUpdateEventChan <- &ConfigUpdateEventImpl{err: e} diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index 0b0a45b4a..21db8b6d2 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -1,6 +1,7 @@ package redis import ( + "io" "math/rand" "github.com/coocood/freecache" @@ -12,15 +13,20 @@ import ( "github.com/envoyproxy/ratelimit/src/utils" ) -func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, statsManager stats.Manager) limiter.RateLimitCache { +func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, statsManager stats.Manager) (limiter.RateLimitCache, io.Closer) { + closer := &utils.MultiCloser{} var perSecondPool Client if s.RedisPerSecond { perSecondPool = NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, s.RedisPerSecondSocketType, - s.RedisPerSecondType, s.RedisPerSecondUrl, s.RedisPerSecondPoolSize, s.RedisPerSecondPipelineWindow, s.RedisPerSecondPipelineLimit, s.RedisTlsConfig, s.RedisHealthCheckActiveConnection, srv) + s.RedisPerSecondType, s.RedisPerSecondUrl, s.RedisPerSecondPoolSize, s.RedisPerSecondPipelineWindow, s.RedisPerSecondPipelineLimit, s.RedisTlsConfig, s.RedisHealthCheckActiveConnection, srv, s.RedisPerSecondTimeout, + s.RedisPerSecondPoolOnEmptyBehavior, s.RedisPerSecondSentinelAuth) + closer.Closers = append(closer.Closers, perSecondPool) } otherPool := NewClientImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisSocketType, s.RedisType, s.RedisUrl, s.RedisPoolSize, - s.RedisPipelineWindow, s.RedisPipelineLimit, s.RedisTlsConfig, s.RedisHealthCheckActiveConnection, srv) + s.RedisPipelineWindow, s.RedisPipelineLimit, s.RedisTlsConfig, s.RedisHealthCheckActiveConnection, srv, s.RedisTimeout, + s.RedisPoolOnEmptyBehavior, s.RedisSentinelAuth) + closer.Closers = append(closer.Closers, otherPool) return NewFixedRateLimitCacheImpl( otherPool, @@ -33,5 +39,5 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca s.CacheKeyPrefix, statsManager, s.StopCacheKeyIncrementWhenOverlimit, - ) + ), closer } diff --git a/src/redis/driver.go b/src/redis/driver.go index 7ffc0c7b7..8f52df42d 100644 --- a/src/redis/driver.go +++ b/src/redis/driver.go @@ -1,6 +1,6 @@ package redis -import "github.com/mediocregopher/radix/v3" +import "github.com/mediocregopher/radix/v4" // Errors that may be raised during config parsing. type RedisError string @@ -41,9 +41,13 @@ type Client interface { // NumActiveConns return number of active connections, used in testing. NumActiveConns() int +} - // ImplicitPipeliningEnabled return true if implicit pipelining is enabled. - ImplicitPipeliningEnabled() bool +// PipelineAction represents a single action in the pipeline along with its key. +// The key is used for grouping commands in cluster mode. +type PipelineAction struct { + Action radix.Action + Key string } -type Pipeline []radix.CmdAction +type Pipeline []PipelineAction diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index f68adddae..50cf44667 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -1,14 +1,16 @@ package redis import ( + "context" "crypto/tls" "fmt" + "net" "strings" "time" stats "github.com/lyft/gostats" - "github.com/mediocregopher/radix/v3" - "github.com/mediocregopher/radix/v3/trace" + "github.com/mediocregopher/radix/v4" + "github.com/mediocregopher/radix/v4/trace" logger "github.com/sirupsen/logrus" "github.com/envoyproxy/ratelimit/src/server" @@ -42,7 +44,7 @@ func poolTrace(ps *poolStats, healthCheckActiveConnection bool, srv server.Serve } } } else { - fmt.Println("creating redis connection error :", newConn.Err) + logger.Errorf("creating redis connection error : %v", newConn.Err) } }, ConnClosed: func(_ trace.PoolConnClosed) { @@ -58,10 +60,17 @@ func poolTrace(ps *poolStats, healthCheckActiveConnection bool, srv server.Serve } } +// redisClient is an interface that abstracts radix Client, Cluster, and Sentinel +// All of these types have Do(context.Context, Action) and Close() methods +type redisClient interface { + Do(context.Context, radix.Action) error + Close() error +} + type clientImpl struct { - client radix.Client - stats poolStats - implicitPipelining bool + client redisClient + stats poolStats + isCluster bool } func checkError(err error) { @@ -70,66 +79,148 @@ func checkError(err error) { } } -func NewClientImpl(scope stats.Scope, useTls bool, auth, redisSocketType, redisType, url string, poolSize int, - pipelineWindow time.Duration, pipelineLimit int, tlsConfig *tls.Config, healthCheckActiveConnection bool, srv server.Server) Client { - maskedUrl := utils.MaskCredentialsInUrl(url) - logger.Warnf("connecting to redis on %s with pool size %d", maskedUrl, poolSize) +// createDialer creates a radix.Dialer with timeout, TLS, and auth configuration +// targetName is used for logging to identify the connection target (e.g., URL, "sentinel(url)") +func createDialer(timeout time.Duration, useTls bool, tlsConfig *tls.Config, auth string, targetName string) radix.Dialer { + var netDialer net.Dialer + if timeout > 0 { + netDialer.Timeout = timeout + } - df := func(network, addr string) (radix.Conn, error) { - var dialOpts []radix.DialOpt + dialer := radix.Dialer{ + NetDialer: &netDialer, + } - if useTls { - dialOpts = append(dialOpts, radix.DialUseTLS(tlsConfig)) + // Setup TLS if needed + if useTls { + tlsNetDialer := tls.Dialer{ + NetDialer: &netDialer, + Config: tlsConfig, } - - if auth != "" { - user, pass, found := strings.Cut(auth, ":") - if found { - logger.Warnf("enabling authentication to redis on %s with user %s", maskedUrl, user) - dialOpts = append(dialOpts, radix.DialAuthUser(user, pass)) - } else { - logger.Warnf("enabling authentication to redis on %s without user", maskedUrl) - dialOpts = append(dialOpts, radix.DialAuthPass(auth)) - } + dialer.NetDialer = &tlsNetDialer + if targetName != "" { + logger.Warnf("enabling TLS to redis %s", targetName) } + } - return radix.Dial(network, addr, dialOpts...) + // Setup auth if provided + if auth != "" { + user, pass, found := strings.Cut(auth, ":") + if found { + logger.Warnf("enabling authentication to redis %s with user %s", targetName, user) + dialer.AuthUser = user + dialer.AuthPass = pass + } else { + logger.Warnf("enabling authentication to redis %s without user", targetName) + dialer.AuthPass = auth + } } + return dialer +} + +func NewClientImpl(scope stats.Scope, useTls bool, auth, redisSocketType, redisType, url string, poolSize int, + pipelineWindow time.Duration, pipelineLimit int, tlsConfig *tls.Config, healthCheckActiveConnection bool, srv server.Server, + timeout time.Duration, poolOnEmptyBehavior string, sentinelAuth string, +) Client { + maskedUrl := utils.MaskCredentialsInUrl(url) + logger.Warnf("connecting to redis on %s with pool size %d", maskedUrl, poolSize) + + // Create Dialer for connecting to Redis + dialer := createDialer(timeout, useTls, tlsConfig, auth, maskedUrl) + stats := newPoolStats(scope) - opts := []radix.PoolOpt{radix.PoolConnFunc(df), radix.PoolWithTrace(poolTrace(&stats, healthCheckActiveConnection, srv))} + // Create PoolConfig + poolConfig := radix.PoolConfig{ + Dialer: dialer, + Size: poolSize, + Trace: poolTrace(&stats, healthCheckActiveConnection, srv), + } + + // Determine pipeline mode based on Redis type: + // - Cluster: uses grouped pipeline (same-key commands batched together) + // - Single/Sentinel: uses explicit pipeline (all commands batched together) + isCluster := strings.ToLower(redisType) == "cluster" + + // pipelineLimit parameter is deprecated and ignored in radix v4. + if pipelineLimit > 0 { + logger.Warnf("REDIS_PIPELINE_LIMIT=%d is deprecated and has no effect in radix v4. Write buffering is controlled solely by REDIS_PIPELINE_WINDOW.", pipelineLimit) + } + + // Set WriteFlushInterval for cluster mode (grouped pipeline uses auto buffering) + if isCluster && pipelineWindow > 0 { + poolConfig.Dialer.WriteFlushInterval = pipelineWindow + logger.Debugf("Cluster mode: setting WriteFlushInterval to %v", pipelineWindow) + } - implicitPipelining := true - if pipelineWindow == 0 && pipelineLimit == 0 { - implicitPipelining = false - } else { - opts = append(opts, radix.PoolPipelineWindow(pipelineWindow, pipelineLimit)) + // IMPORTANT: radix v4 pool behavior changes from v3 + // + // v4 uses a FIXED pool size and BLOCKS when all connections are in use. + // This is the same as v3's WAIT behavior. + // + // v3 CREATE and ERROR behaviors are NOT supported in v4: + // - v3 WAIT → v4 supported (blocks until connection available) + // - v3 CREATE → v4 NOT SUPPORTED (would block instead of creating overflow connections) + // - v3 ERROR → v4 NOT SUPPORTED (would block instead of failing fast) + // + // Migration requirements: + // - Remove REDIS_POOL_ON_EMPTY_BEHAVIOR setting if set to CREATE or ERROR + // - Use WAIT or leave unset (WAIT is default) + // - Consider increasing REDIS_POOL_SIZE if you previously relied on CREATE + // - Use context timeouts to prevent indefinite blocking: + // ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + // defer cancel() + // client.Do(ctx, cmd) + switch strings.ToUpper(poolOnEmptyBehavior) { + case "WAIT": + logger.Warnf("Redis pool %s: WAIT is default in radix v4 (blocks until connection available)", maskedUrl) + case "CREATE": + // v3 CREATE created overflow connections when pool was full + // v4 does NOT support this - fail fast to prevent unexpected blocking behavior + panic(RedisError("REDIS_POOL_ON_EMPTY_BEHAVIOR=CREATE is not supported in radix v4. Pool will block instead of creating overflow connections. Remove this setting or set to WAIT, and consider increasing REDIS_POOL_SIZE.")) + case "ERROR": + // v3 ERROR failed fast when pool was full + // v4 does NOT support this - fail fast to prevent unexpected blocking behavior + panic(RedisError("REDIS_POOL_ON_EMPTY_BEHAVIOR=ERROR is not supported in radix v4. Pool will block instead of failing fast. Remove this setting or set to WAIT, and use context timeouts for fail-fast behavior.")) + default: + logger.Warnf("Redis pool %s: using v4 default (fixed size=%d, blocks when full)", maskedUrl, poolSize) } - logger.Debugf("Implicit pipelining enabled: %v", implicitPipelining) - poolFunc := func(network, addr string) (radix.Client, error) { - return radix.NewPool(network, addr, poolSize, opts...) + poolFunc := func(ctx context.Context, network, addr string) (radix.Client, error) { + return poolConfig.New(ctx, network, addr) } - var client radix.Client + var client redisClient var err error + ctx := context.Background() + switch strings.ToLower(redisType) { case "single": - client, err = poolFunc(redisSocketType, url) + logger.Warnf("Creating single with urls %v", url) + client, err = poolFunc(ctx, redisSocketType, url) case "cluster": urls := strings.Split(url, ",") - if !implicitPipelining { - panic(RedisError("Implicit Pipelining must be enabled to work with Redis Cluster Mode. Set values for REDIS_PIPELINE_WINDOW or REDIS_PIPELINE_LIMIT to enable implicit pipelining")) - } logger.Warnf("Creating cluster with urls %v", urls) - client, err = radix.NewCluster(urls, radix.ClusterPoolFunc(poolFunc)) + clusterConfig := radix.ClusterConfig{ + PoolConfig: poolConfig, + } + client, err = clusterConfig.New(ctx, urls) case "sentinel": urls := strings.Split(url, ",") if len(urls) < 2 { panic(RedisError("Expected master name and a list of urls for the sentinels, in the format: ,,...,")) } - client, err = radix.NewSentinel(urls[0], urls[1:], radix.SentinelPoolFunc(poolFunc)) + + // Create sentinel dialer (may use different auth from Redis master/replica) + // sentinelAuth is for Sentinel nodes, auth is for Redis master/replica + sentinelDialer := createDialer(timeout, useTls, tlsConfig, sentinelAuth, fmt.Sprintf("sentinel(%s)", maskedUrl)) + + sentinelConfig := radix.SentinelConfig{ + PoolConfig: poolConfig, + SentinelDialer: sentinelDialer, + } + client, err = sentinelConfig.New(ctx, urls[0], urls[1:]) default: panic(RedisError("Unrecognized redis type " + redisType)) } @@ -138,20 +229,25 @@ func NewClientImpl(scope stats.Scope, useTls bool, auth, redisSocketType, redisT // Check if connection is good var pingResponse string - checkError(client.Do(radix.Cmd(&pingResponse, "PING"))) + checkError(client.Do(ctx, radix.Cmd(&pingResponse, "PING"))) if pingResponse != "PONG" { checkError(fmt.Errorf("connecting redis error: %s", pingResponse)) } return &clientImpl{ - client: client, - stats: stats, - implicitPipelining: implicitPipelining, + client: client, + stats: stats, + isCluster: isCluster, } } func (c *clientImpl) DoCmd(rcv interface{}, cmd, key string, args ...interface{}) error { - return c.client.Do(radix.FlatCmd(rcv, cmd, key, args...)) + ctx := context.Background() + // Combine key and args into a single slice + allArgs := make([]interface{}, 0, 1+len(args)) + allArgs = append(allArgs, key) + allArgs = append(allArgs, args...) + return c.client.Do(ctx, radix.FlatCmd(rcv, cmd, allArgs...)) } func (c *clientImpl) Close() error { @@ -163,22 +259,67 @@ func (c *clientImpl) NumActiveConns() int { } func (c *clientImpl) PipeAppend(pipeline Pipeline, rcv interface{}, cmd, key string, args ...interface{}) Pipeline { - return append(pipeline, radix.FlatCmd(rcv, cmd, key, args...)) + // Combine key and args into a single slice + allArgs := make([]interface{}, 0, 1+len(args)) + allArgs = append(allArgs, key) + allArgs = append(allArgs, args...) + return append(pipeline, PipelineAction{ + Action: radix.FlatCmd(rcv, cmd, allArgs...), + Key: key, + }) } func (c *clientImpl) PipeDo(pipeline Pipeline) error { - if c.implicitPipelining { - for _, action := range pipeline { - if err := c.client.Do(action); err != nil { + ctx := context.Background() + if c.isCluster { + // Cluster mode: group commands by key and execute each group as a pipeline. + // This ensures INCRBY + EXPIRE for the same key are pipelined together (same slot), + // reducing round-trips from 2 to 1 per key. + return c.executeGroupedPipeline(ctx, pipeline) + } + + // Single/Sentinel mode: batch all commands in a single pipeline. + p := radix.NewPipeline() + for _, pipelineAction := range pipeline { + p.Append(pipelineAction.Action) + } + return c.client.Do(ctx, p) +} + +// executeGroupedPipeline groups pipeline actions by key and executes each group +// as a separate pipeline. This allows same-key commands (like INCRBY + EXPIRE) +// to be pipelined together even in cluster mode. +func (c *clientImpl) executeGroupedPipeline(ctx context.Context, pipeline Pipeline) error { + // Group actions by key, preserving first-occurrence order + var groups [][]radix.Action + keyToIndex := make(map[string]int) + + for _, pa := range pipeline { + if idx, exists := keyToIndex[pa.Key]; exists { + groups[idx] = append(groups[idx], pa.Action) + } else { + keyToIndex[pa.Key] = len(groups) + groups = append(groups, []radix.Action{pa.Action}) + } + } + + // Execute each group + for _, actions := range groups { + if len(actions) == 1 { + if err := c.client.Do(ctx, actions[0]); err != nil { + return err + } + } else { + // Multiple commands for same key: pipeline them together + p := radix.NewPipeline() + for _, action := range actions { + p.Append(action) + } + if err := c.client.Do(ctx, p); err != nil { return err } } - return nil } - return c.client.Do(radix.Pipeline(pipeline...)) -} - -func (c *clientImpl) ImplicitPipeliningEnabled() bool { - return c.implicitPipelining + return nil } diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index 4ec34b3d1..b1f290de3 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -32,119 +32,132 @@ type fixedRateLimitCacheImpl struct { baseRateLimiter *limiter.BaseRateLimiter } -func pipelineAppend(client Client, pipeline *Pipeline, key string, hitsAddend uint32, result *uint32, expirationSeconds int64) { +func pipelineAppend(client Client, pipeline *Pipeline, key string, hitsAddend uint64, result *uint64, expirationSeconds int64) { *pipeline = client.PipeAppend(*pipeline, result, "INCRBY", key, hitsAddend) *pipeline = client.PipeAppend(*pipeline, nil, "EXPIRE", key, expirationSeconds) } -func pipelineAppendtoGet(client Client, pipeline *Pipeline, key string, result *uint32) { +func pipelineAppendtoGet(client Client, pipeline *Pipeline, key string, result *uint64) { *pipeline = client.PipeAppend(*pipeline, result, "GET", key) } +func (this *fixedRateLimitCacheImpl) getHitsAddend(hitsAddend uint64, isCacheKeyOverlimit, isCacheKeyNearlimit, + isNearLimt bool, +) uint64 { + // If stopCacheKeyIncrementWhenOverlimit is false, then we always increment the cache key. + if !this.stopCacheKeyIncrementWhenOverlimit { + return hitsAddend + } + + // If stopCacheKeyIncrementWhenOverlimit is true, and one of the keys is over limit, then + // we do not increment the cache key. + if isCacheKeyOverlimit { + return 0 + } + + // If stopCacheKeyIncrementWhenOverlimit is true, and none of the keys are over limit, then + // to check if any of the keys are near limit. If none of the keys are near limit, + // then we increment the cache key. + if !isCacheKeyNearlimit { + return hitsAddend + } + + // If stopCacheKeyIncrementWhenOverlimit is true, and some of the keys are near limit, then + // we only increment the cache key if the key is near limit. + if isNearLimt { + return hitsAddend + } + + return 0 +} + func (this *fixedRateLimitCacheImpl) DoLimit( ctx context.Context, request *pb.RateLimitRequest, - limits []*config.RateLimit) []*pb.RateLimitResponse_DescriptorStatus { - + limits []*config.RateLimit, +) []*pb.RateLimitResponse_DescriptorStatus { logger.Debugf("starting cache lookup") - // request.HitsAddend could be 0 (default value) if not specified by the caller in the RateLimit request. - hitsAddend := utils.Max(1, request.HitsAddend) + hitsAddends := utils.GetHitsAddends(request) // First build a list of all cache keys that we are actually going to hit. - cacheKeys := this.baseRateLimiter.GenerateCacheKeys(request, limits, hitsAddend) + cacheKeys := this.baseRateLimiter.GenerateCacheKeys(request, limits, hitsAddends) isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) - results := make([]uint32, len(request.Descriptors)) - currentCount := make([]uint32, len(request.Descriptors)) + results := make([]uint64, len(request.Descriptors)) + currentCount := make([]uint64, len(request.Descriptors)) var pipeline, perSecondPipeline, pipelineToGet, perSecondPipelineToGet Pipeline - hitsAddendForRedis := hitsAddend overlimitIndexes := make([]bool, len(request.Descriptors)) nearlimitIndexes := make([]bool, len(request.Descriptors)) isCacheKeyOverlimit := false + isCacheKeyNearlimit := false + + // Check if any of the keys are already to the over limit in cache. + for i, cacheKey := range cacheKeys { + if cacheKey.Key == "" { + continue + } + + // Check if key is over the limit in local cache. + if this.baseRateLimiter.IsOverLimitWithLocalCache(cacheKey.Key) { + if limits[i].ShadowMode { + logger.Debugf("Cache key %s would be rate limited but shadow mode is enabled on this rule", cacheKey.Key) + } else { + logger.Debugf("cache key is over the limit: %s", cacheKey.Key) + } + isCacheKeyOverlimit = true + isOverLimitWithLocalCache[i] = true + overlimitIndexes[i] = true + } + } - if this.stopCacheKeyIncrementWhenOverlimit { - // Check if any of the keys are reaching to the over limit in redis cache. + // If none of the keys are over limit in local cache and the stopCacheKeyIncrementWhenOverlimit is true, + // then we check if any of the keys are near limit in redis cache. + if this.stopCacheKeyIncrementWhenOverlimit && !isCacheKeyOverlimit { for i, cacheKey := range cacheKeys { if cacheKey.Key == "" { continue } - // Check if key is over the limit in local cache. - if this.baseRateLimiter.IsOverLimitWithLocalCache(cacheKey.Key) { - if limits[i].ShadowMode { - logger.Debugf("Cache key %s would be rate limited but shadow mode is enabled on this rule", cacheKey.Key) - } else { - logger.Debugf("cache key is over the limit: %s", cacheKey.Key) + if this.perSecondClient != nil && cacheKey.PerSecond { + if perSecondPipelineToGet == nil { + perSecondPipelineToGet = Pipeline{} } - isOverLimitWithLocalCache[i] = true - hitsAddendForRedis = 0 - overlimitIndexes[i] = true - isCacheKeyOverlimit = true - continue + pipelineAppendtoGet(this.perSecondClient, &perSecondPipelineToGet, cacheKey.Key, ¤tCount[i]) } else { - if this.perSecondClient != nil && cacheKey.PerSecond { - if perSecondPipelineToGet == nil { - perSecondPipelineToGet = Pipeline{} - } - pipelineAppendtoGet(this.perSecondClient, &perSecondPipelineToGet, cacheKey.Key, ¤tCount[i]) - } else { - if pipelineToGet == nil { - pipelineToGet = Pipeline{} - } - pipelineAppendtoGet(this.client, &pipelineToGet, cacheKey.Key, ¤tCount[i]) + if pipelineToGet == nil { + pipelineToGet = Pipeline{} } + pipelineAppendtoGet(this.client, &pipelineToGet, cacheKey.Key, ¤tCount[i]) } } - // Only if none of the cache keys exceed the limit, call Redis to check whether the cache keys are becoming overlimited. - if len(cacheKeys) > 1 && !isCacheKeyOverlimit { - if pipelineToGet != nil { - checkError(this.client.PipeDo(pipelineToGet)) - } - if perSecondPipelineToGet != nil { - checkError(this.perSecondClient.PipeDo(perSecondPipelineToGet)) - } - - for i, cacheKey := range cacheKeys { - if cacheKey.Key == "" { - continue - } - // Now fetch the pipeline. - limitBeforeIncrease := currentCount[i] - limitAfterIncrease := limitBeforeIncrease + hitsAddend - - limitInfo := limiter.NewRateLimitInfo(limits[i], limitBeforeIncrease, limitAfterIncrease, 0, 0) - - if this.baseRateLimiter.IsOverLimitThresholdReached(limitInfo) { - hitsAddendForRedis = 0 - nearlimitIndexes[i] = true - } - } + if pipelineToGet != nil { + checkError(this.client.PipeDo(pipelineToGet)) } - } else { - // Check if any of the keys are reaching to the over limit in redis cache. + if perSecondPipelineToGet != nil { + checkError(this.perSecondClient.PipeDo(perSecondPipelineToGet)) + } + for i, cacheKey := range cacheKeys { if cacheKey.Key == "" { continue } + // Now fetch the pipeline. + limitBeforeIncrease := currentCount[i] + limitAfterIncrease := limitBeforeIncrease + hitsAddends[i] - // Check if key is over the limit in local cache. - if this.baseRateLimiter.IsOverLimitWithLocalCache(cacheKey.Key) { - if limits[i].ShadowMode { - logger.Debugf("Cache key %s would be rate limited but shadow mode is enabled on this rule", cacheKey.Key) - } else { - logger.Debugf("cache key is over the limit: %s", cacheKey.Key) - } - isOverLimitWithLocalCache[i] = true - overlimitIndexes[i] = true - continue + limitInfo := limiter.NewRateLimitInfo(limits[i], limitBeforeIncrease, limitAfterIncrease, 0, 0) + + if this.baseRateLimiter.IsOverLimitThresholdReached(limitInfo) { + nearlimitIndexes[i] = true + isCacheKeyNearlimit = true } } } - // Now, actually setup the pipeline, skipping empty cache keys. + // Now, actually setup the pipeline to increase the usage of cache key, skipping empty cache keys. for i, cacheKey := range cacheKeys { if cacheKey.Key == "" || overlimitIndexes[i] { continue @@ -162,20 +175,14 @@ func (this *fixedRateLimitCacheImpl) DoLimit( if perSecondPipeline == nil { perSecondPipeline = Pipeline{} } - if nearlimitIndexes[i] { - pipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) - } else { - pipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, hitsAddendForRedis, &results[i], expirationSeconds) - } + pipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, this.getHitsAddend(hitsAddends[i], + isCacheKeyOverlimit, isCacheKeyNearlimit, nearlimitIndexes[i]), &results[i], expirationSeconds) } else { if pipeline == nil { pipeline = Pipeline{} } - if nearlimitIndexes[i] { - pipelineAppend(this.client, &pipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) - } else { - pipelineAppend(this.client, &pipeline, cacheKey.Key, hitsAddendForRedis, &results[i], expirationSeconds) - } + pipelineAppend(this.client, &pipeline, cacheKey.Key, this.getHitsAddend(hitsAddends[i], isCacheKeyOverlimit, + isCacheKeyNearlimit, nearlimitIndexes[i]), &results[i], expirationSeconds) } } @@ -201,12 +208,12 @@ func (this *fixedRateLimitCacheImpl) DoLimit( for i, cacheKey := range cacheKeys { limitAfterIncrease := results[i] - limitBeforeIncrease := limitAfterIncrease - hitsAddend + limitBeforeIncrease := limitAfterIncrease - hitsAddends[i] limitInfo := limiter.NewRateLimitInfo(limits[i], limitBeforeIncrease, limitAfterIncrease, 0, 0) responseDescriptorStatuses[i] = this.baseRateLimiter.GetResponseDescriptorStatus(cacheKey.Key, - limitInfo, isOverLimitWithLocalCache[i], hitsAddend) + limitInfo, isOverLimitWithLocalCache[i], hitsAddends[i]) } @@ -218,7 +225,8 @@ func (this *fixedRateLimitCacheImpl) Flush() {} func NewFixedRateLimitCacheImpl(client Client, perSecondClient Client, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string, statsManager stats.Manager, - stopCacheKeyIncrementWhenOverlimit bool) limiter.RateLimitCache { + stopCacheKeyIncrementWhenOverlimit bool, +) limiter.RateLimitCache { return &fixedRateLimitCacheImpl{ client: client, perSecondClient: perSecondClient, diff --git a/src/server/health.go b/src/server/health.go index 7a2f9cb70..244a760e7 100644 --- a/src/server/health.go +++ b/src/server/health.go @@ -97,7 +97,7 @@ func (hc *HealthChecker) Fail(componentName string) error { hc.grpc.SetServingStatus(hc.name, healthpb.HealthCheckResponse_NOT_SERVING) } else { errorText := fmt.Sprintf("Invalid component: %s", componentName) - logger.Errorf(errorText) + logger.Error(errorText) return errors.New(errorText) } return nil @@ -118,7 +118,7 @@ func (hc *HealthChecker) Ok(componentName string) error { } } else { errorText := fmt.Sprintf("Invalid component: %s", componentName) - logger.Errorf(errorText) + logger.Error(errorText) return errors.New(errorText) } diff --git a/src/server/server_impl.go b/src/server/server_impl.go index 85c636b7a..e9402da0a 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -25,8 +25,7 @@ import ( "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/gorilla/mux" - reuseport "github.com/kavu/go_reuseport" - "github.com/lyft/goruntime/loader" + "github.com/libp2p/go-reuseport" gostats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" "google.golang.org/grpc" @@ -50,20 +49,28 @@ type serverDebugListener struct { listener net.Listener } +type grpcListenType int + +const ( + tcp grpcListenType = 0 + unixDomainSocket grpcListenType = 1 +) + type server struct { - httpAddress string - grpcAddress string - debugAddress string - router *mux.Router - grpcServer *grpc.Server - store gostats.Store - scope gostats.Scope - provider provider.RateLimitConfigProvider - runtime loader.IFace - debugListener serverDebugListener - httpServer *http.Server - listenerMu sync.Mutex - health *HealthChecker + httpAddress string + grpcAddress string + grpcListenType grpcListenType + debugAddress string + router *mux.Router + grpcServer *grpc.Server + store gostats.Store + scope gostats.Scope + provider provider.RateLimitConfigProvider + debugListener serverDebugListener + httpServer *http.Server + listenerMu sync.Mutex + health *HealthChecker + grpcCertProvider *provider.CertProvider } func (server *server) AddDebugHttpEndpoint(path string, help string, handler http.HandlerFunc) { @@ -197,9 +204,20 @@ func (server *server) Start() { func (server *server) startGrpc() { logger.Warnf("Listening for gRPC on '%s'", server.grpcAddress) - lis, err := reuseport.Listen("tcp", server.grpcAddress) + var lis net.Listener + var err error + + switch server.grpcListenType { + case tcp: + lis, err = reuseport.Listen("tcp", server.grpcAddress) + case unixDomainSocket: + lis, err = net.Listen("unix", server.grpcAddress) + default: + logger.Fatalf("Invalid gRPC listen type %v", server.grpcListenType) + } + if err != nil { - logger.Fatalf("Failed to listen for gRPC: %v", err) + logger.Fatalf("Failed to listen for gRPC on '%s': %v", server.grpcAddress, err) } server.grpcServer.Serve(lis) } @@ -223,6 +241,14 @@ func newServer(s settings.Settings, name string, statsManager stats.Manager, loc ret := new(server) + // setup stats + ret.store = statsManager.GetStatsStore() + ret.scope = ret.store.ScopeWithTags(name, s.ExtraTags) + ret.store.AddStatGenerator(gostats.NewRuntimeStats(ret.scope.Scope("go"))) + if localCache != nil { + ret.store.AddStatGenerator(limiter.NewLocalCacheStats(localCache, ret.scope.Scope("localcache"))) + } + keepaliveOpt := grpc.KeepaliveParams(keepalive.ServerParameters{ MaxConnectionAge: s.GrpcMaxConnectionAge, MaxConnectionAgeGrace: s.GrpcMaxConnectionAgeGrace, @@ -237,6 +263,10 @@ func newServer(s settings.Settings, name string, statsManager stats.Manager, loc } if s.GrpcServerUseTLS { grpcServerTlsConfig := s.GrpcServerTlsConfig + ret.grpcCertProvider = provider.NewCertProvider(s, ret.store, s.GrpcServerTlsCert, s.GrpcServerTlsKey) + // Remove the static certificates and use the provider via the GetCertificate function + grpcServerTlsConfig.Certificates = nil + grpcServerTlsConfig.GetCertificate = ret.grpcCertProvider.GetCertificateFunc() // Verify client SAN if provided if s.GrpcClientTlsSAN != "" { grpcServerTlsConfig.VerifyPeerCertificate = verifyClient(grpcServerTlsConfig.ClientCAs, s.GrpcClientTlsSAN) @@ -247,16 +277,14 @@ func newServer(s settings.Settings, name string, statsManager stats.Manager, loc // setup listen addresses ret.httpAddress = net.JoinHostPort(s.Host, strconv.Itoa(s.Port)) - ret.grpcAddress = net.JoinHostPort(s.GrpcHost, strconv.Itoa(s.GrpcPort)) - ret.debugAddress = net.JoinHostPort(s.DebugHost, strconv.Itoa(s.DebugPort)) - - // setup stats - ret.store = statsManager.GetStatsStore() - ret.scope = ret.store.ScopeWithTags(name, s.ExtraTags) - ret.store.AddStatGenerator(gostats.NewRuntimeStats(ret.scope.Scope("go"))) - if localCache != nil { - ret.store.AddStatGenerator(limiter.NewLocalCacheStats(localCache, ret.scope.Scope("localcache"))) + if s.GrpcUds != "" { + ret.grpcAddress = s.GrpcUds + ret.grpcListenType = unixDomainSocket + } else { + ret.grpcAddress = net.JoinHostPort(s.GrpcHost, strconv.Itoa(s.GrpcPort)) + ret.grpcListenType = tcp } + ret.debugAddress = net.JoinHostPort(s.DebugHost, strconv.Itoa(s.DebugPort)) // setup config provider ret.provider = getProviderImpl(s, statsManager, ret.store) diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index e918d9bb3..559b49533 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -10,6 +10,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" + "google.golang.org/protobuf/types/known/structpb" "github.com/envoyproxy/ratelimit/src/settings" "github.com/envoyproxy/ratelimit/src/stats" @@ -17,6 +18,7 @@ import ( "github.com/envoyproxy/ratelimit/src/utils" core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + ratelimitv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" @@ -33,23 +35,25 @@ var tracer = otel.Tracer("ratelimit") type RateLimitServiceServer interface { pb.RateLimitServiceServer - GetCurrentConfig() (config.RateLimitConfig, bool) + GetCurrentConfig() (config.RateLimitConfig, bool, bool) SetConfig(updateEvent provider.ConfigUpdateEvent, healthyWithAtLeastOneConfigLoad bool) } type service struct { - configLock sync.RWMutex - configUpdateEvent <-chan provider.ConfigUpdateEvent - config config.RateLimitConfig - cache limiter.RateLimitCache - stats stats.ServiceStats - health *server.HealthChecker - customHeadersEnabled bool - customHeaderLimitHeader string - customHeaderRemainingHeader string - customHeaderResetHeader string - customHeaderClock utils.TimeSource - globalShadowMode bool + configLock sync.RWMutex + configUpdateEvent <-chan provider.ConfigUpdateEvent + config config.RateLimitConfig + cache limiter.RateLimitCache + stats stats.ServiceStats + health *server.HealthChecker + customHeadersEnabled bool + customHeaderLimitHeader string + customHeaderRemainingHeader string + customHeaderResetHeader string + customHeaderClock utils.TimeSource + globalShadowMode bool + globalQuotaMode bool + responseDynamicMetadataEnabled bool } func (this *service) SetConfig(updateEvent provider.ConfigUpdateEvent, healthyWithAtLeastOneConfigLoad bool) { @@ -84,6 +88,8 @@ func (this *service) SetConfig(updateEvent provider.ConfigUpdateEvent, healthyWi rlSettings := settings.NewSettings() this.globalShadowMode = rlSettings.GlobalShadowMode + this.globalQuotaMode = rlSettings.GlobalQuotaMode + this.responseDynamicMetadataEnabled = rlSettings.ResponseDynamicMetadata if rlSettings.RateLimitResponseHeadersEnabled { this.customHeadersEnabled = true @@ -177,14 +183,17 @@ func (this *service) constructLimitsToCheck(request *pb.RateLimitRequest, ctx co const MaxUint32 = uint32(1<<32 - 1) func (this *service) shouldRateLimitWorker( - ctx context.Context, request *pb.RateLimitRequest) *pb.RateLimitResponse { - + ctx context.Context, request *pb.RateLimitRequest, +) *pb.RateLimitResponse { checkServiceErr(request.Domain != "", "rate limit domain must not be empty") checkServiceErr(len(request.Descriptors) != 0, "rate limit descriptor list must not be empty") - snappedConfig, globalShadowMode := this.GetCurrentConfig() + snappedConfig, globalShadowMode, globalQuotaMode := this.GetCurrentConfig() limitsToCheck, isUnlimited := this.constructLimitsToCheck(request, ctx, snappedConfig) + assert.Assert(len(limitsToCheck) == len(isUnlimited)) + assert.Assert(len(limitsToCheck) == len(request.Descriptors)) + responseDescriptorStatuses := this.cache.DoLimit(ctx, request, limitsToCheck) assert.Assert(len(limitsToCheck) == len(responseDescriptorStatuses)) @@ -196,6 +205,9 @@ func (this *service) shouldRateLimitWorker( minLimitRemaining := MaxUint32 var minimumDescriptor *pb.RateLimitResponse_DescriptorStatus = nil + // Track quota mode violations for metadata + var quotaModeViolations []int + for i, descriptorStatus := range responseDescriptorStatuses { // Keep track of the descriptor closest to hit the ratelimit if this.customHeadersEnabled && @@ -213,10 +225,23 @@ func (this *service) shouldRateLimitWorker( } else { response.Statuses[i] = descriptorStatus if descriptorStatus.Code == pb.RateLimitResponse_OVER_LIMIT { - finalCode = descriptorStatus.Code - - minimumDescriptor = descriptorStatus - minLimitRemaining = 0 + // Check if this limit is in quota mode (individual or global) + isQuotaMode := globalQuotaMode || (limitsToCheck[i] != nil && limitsToCheck[i].QuotaMode) + + if isQuotaMode { + // In quota mode: track the violation for metadata but keep response as OK + quotaModeViolations = append(quotaModeViolations, i) + response.Statuses[i] = &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OK, + CurrentLimit: descriptorStatus.CurrentLimit, + LimitRemaining: descriptorStatus.LimitRemaining, + } + } else { + // Normal rate limit: set final code to OVER_LIMIT + finalCode = descriptorStatus.Code + minimumDescriptor = descriptorStatus + minLimitRemaining = 0 + } } } } @@ -236,10 +261,93 @@ func (this *service) shouldRateLimitWorker( this.stats.GlobalShadowMode.Inc() } + // If response dynamic data enabled, set dynamic data on response. + if this.responseDynamicMetadataEnabled { + response.DynamicMetadata = ratelimitToMetadata(request, quotaModeViolations, limitsToCheck) + } + response.OverallCode = finalCode return response } +func ratelimitToMetadata(req *pb.RateLimitRequest, quotaModeViolations []int, limitsToCheck []*config.RateLimit) *structpb.Struct { + fields := make(map[string]*structpb.Value) + + // Domain + fields["domain"] = structpb.NewStringValue(req.Domain) + + // Descriptors + descriptorsValues := make([]*structpb.Value, 0, len(req.Descriptors)) + for _, descriptor := range req.Descriptors { + s := descriptorToStruct(descriptor) + if s == nil { + continue + } + descriptorsValues = append(descriptorsValues, structpb.NewStructValue(s)) + } + fields["descriptors"] = structpb.NewListValue(&structpb.ListValue{ + Values: descriptorsValues, + }) + + // HitsAddend + if hitsAddend := req.GetHitsAddend(); hitsAddend != 0 { + fields["hitsAddend"] = structpb.NewNumberValue(float64(hitsAddend)) + } + + // Quota mode information + if len(quotaModeViolations) > 0 { + violationValues := make([]*structpb.Value, len(quotaModeViolations)) + for i, violationIndex := range quotaModeViolations { + violationValues[i] = structpb.NewNumberValue(float64(violationIndex)) + } + fields["quotaModeViolations"] = structpb.NewListValue(&structpb.ListValue{ + Values: violationValues, + }) + } + + // Check if any limits have quota mode enabled + quotaModeEnabled := false + for _, limit := range limitsToCheck { + if limit != nil && limit.QuotaMode { + quotaModeEnabled = true + break + } + } + fields["quotaModeEnabled"] = structpb.NewBoolValue(quotaModeEnabled) + + return &structpb.Struct{Fields: fields} +} + +func descriptorToStruct(descriptor *ratelimitv3.RateLimitDescriptor) *structpb.Struct { + if descriptor == nil { + return nil + } + + fields := make(map[string]*structpb.Value) + + // Entries + entriesValues := make([]*structpb.Value, 0, len(descriptor.Entries)) + for _, entry := range descriptor.Entries { + val := fmt.Sprintf("%s=%s", entry.GetKey(), entry.GetValue()) + entriesValues = append(entriesValues, structpb.NewStringValue(val)) + } + fields["entries"] = structpb.NewListValue(&structpb.ListValue{ + Values: entriesValues, + }) + + // Limit + if descriptor.GetLimit() != nil { + fields["limit"] = structpb.NewStringValue(descriptor.Limit.String()) + } + + // HitsAddend + if hitsAddend := descriptor.GetHitsAddend(); hitsAddend != nil { + fields["hitsAddend"] = structpb.NewNumberValue(float64(hitsAddend.GetValue())) + } + + return &structpb.Struct{Fields: fields} +} + func (this *service) rateLimitLimitHeader(descriptor *pb.RateLimitResponse_DescriptorStatus) *core.HeaderValue { // Limit header only provides the mandatory part from the spec, the actual limit // the optional quota policy is currently not provided @@ -258,8 +366,8 @@ func (this *service) rateLimitRemainingHeader(descriptor *pb.RateLimitResponse_D } func (this *service) rateLimitResetHeader( - descriptor *pb.RateLimitResponse_DescriptorStatus) *core.HeaderValue { - + descriptor *pb.RateLimitResponse_DescriptorStatus, +) *core.HeaderValue { return &core.HeaderValue{ Key: this.customHeaderResetHeader, Value: strconv.FormatInt(utils.CalculateReset(&descriptor.CurrentLimit.Unit, this.customHeaderClock).GetSeconds(), 10), @@ -268,8 +376,8 @@ func (this *service) rateLimitResetHeader( func (this *service) ShouldRateLimit( ctx context.Context, - request *pb.RateLimitRequest) (finalResponse *pb.RateLimitResponse, finalError error) { - + request *pb.RateLimitRequest, +) (finalResponse *pb.RateLimitResponse, finalError error) { // Generate trace _, span := tracer.Start(ctx, "ShouldRateLimit Execution", trace.WithAttributes( @@ -285,7 +393,8 @@ func (this *service) ShouldRateLimit( return } - logger.Debugf("caught error during call") + logger.Debugf("caught error during call: %v", err) + finalResponse = nil switch t := err.(type) { case redis.RedisError: @@ -304,20 +413,20 @@ func (this *service) ShouldRateLimit( }() response := this.shouldRateLimitWorker(ctx, request) - logger.Debugf("returning normal response") + logger.Debugf("returning normal response: %+v", response) return response, nil } -func (this *service) GetCurrentConfig() (config.RateLimitConfig, bool) { +func (this *service) GetCurrentConfig() (config.RateLimitConfig, bool, bool) { this.configLock.RLock() defer this.configLock.RUnlock() - return this.config, this.globalShadowMode + return this.config, this.globalShadowMode, this.globalQuotaMode } func NewService(cache limiter.RateLimitCache, configProvider provider.RateLimitConfigProvider, statsManager stats.Manager, - health *server.HealthChecker, clock utils.TimeSource, shadowMode, forceStart bool, healthyWithAtLeastOneConfigLoad bool) RateLimitServiceServer { - + health *server.HealthChecker, clock utils.TimeSource, shadowMode, forceStart bool, healthyWithAtLeastOneConfigLoad bool, +) RateLimitServiceServer { newService := &service{ configLock: sync.RWMutex{}, configUpdateEvent: configProvider.ConfigUpdateEvent(), @@ -326,6 +435,7 @@ func NewService(cache limiter.RateLimitCache, configProvider provider.RateLimitC stats: statsManager.NewServiceStats(), health: health, globalShadowMode: shadowMode, + globalQuotaMode: false, customHeaderClock: clock, } diff --git a/src/service/ratelimit_test.go b/src/service/ratelimit_test.go new file mode 100644 index 000000000..0f43b6fb0 --- /dev/null +++ b/src/service/ratelimit_test.go @@ -0,0 +1,204 @@ +package ratelimit + +import ( + "testing" + + ratelimitv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/structpb" + + "github.com/envoyproxy/ratelimit/src/config" +) + +func TestRatelimitToMetadata(t *testing.T) { + cases := []struct { + name string + req *pb.RateLimitRequest + quotaModeViolations []int + limitsToCheck []*config.RateLimit + expected string + }{ + { + name: "Single descriptor with single entry, no quota violations", + req: &pb.RateLimitRequest{ + Domain: "fake-domain", + Descriptors: []*ratelimitv3.RateLimitDescriptor{ + { + Entries: []*ratelimitv3.RateLimitDescriptor_Entry{ + { + Key: "key1", + Value: "val1", + }, + }, + }, + }, + }, + quotaModeViolations: nil, + limitsToCheck: []*config.RateLimit{nil}, + expected: `{ + "descriptors": [ + { + "entries": [ + "key1=val1" + ] + } + ], + "domain": "fake-domain", + "quotaModeEnabled": false +}`, + }, + { + name: "Single descriptor with quota mode violation", + req: &pb.RateLimitRequest{ + Domain: "quota-domain", + Descriptors: []*ratelimitv3.RateLimitDescriptor{ + { + Entries: []*ratelimitv3.RateLimitDescriptor_Entry{ + { + Key: "quota_key", + Value: "quota_val", + }, + }, + }, + }, + }, + quotaModeViolations: []int{0}, + limitsToCheck: []*config.RateLimit{ + { + QuotaMode: true, + }, + }, + expected: `{ + "descriptors": [ + { + "entries": [ + "quota_key=quota_val" + ] + } + ], + "domain": "quota-domain", + "quotaModeEnabled": true, + "quotaModeViolations": [0] +}`, + }, + { + name: "Multiple descriptors with mixed quota violations", + req: &pb.RateLimitRequest{ + Domain: "mixed-domain", + Descriptors: []*ratelimitv3.RateLimitDescriptor{ + { + Entries: []*ratelimitv3.RateLimitDescriptor_Entry{ + { + Key: "regular_key", + Value: "regular_val", + }, + }, + }, + { + Entries: []*ratelimitv3.RateLimitDescriptor_Entry{ + { + Key: "quota_key", + Value: "quota_val", + }, + }, + }, + { + Entries: []*ratelimitv3.RateLimitDescriptor_Entry{ + { + Key: "another_quota", + Value: "another_val", + }, + }, + }, + }, + }, + quotaModeViolations: []int{1, 2}, + limitsToCheck: []*config.RateLimit{ + { + QuotaMode: false, + }, + { + QuotaMode: true, + }, + { + QuotaMode: true, + }, + }, + expected: `{ + "descriptors": [ + { + "entries": [ + "regular_key=regular_val" + ] + }, + { + "entries": [ + "quota_key=quota_val" + ] + }, + { + "entries": [ + "another_quota=another_val" + ] + } + ], + "domain": "mixed-domain", + "quotaModeEnabled": true, + "quotaModeViolations": [1, 2] +}`, + }, + { + name: "Request with hits addend", + req: &pb.RateLimitRequest{ + Domain: "addend-domain", + HitsAddend: 5, + Descriptors: []*ratelimitv3.RateLimitDescriptor{ + { + Entries: []*ratelimitv3.RateLimitDescriptor_Entry{ + { + Key: "test_key", + Value: "test_val", + }, + }, + }, + }, + }, + quotaModeViolations: []int{0}, + limitsToCheck: []*config.RateLimit{ + { + QuotaMode: true, + }, + }, + expected: `{ + "descriptors": [ + { + "entries": [ + "test_key=test_val" + ] + } + ], + "domain": "addend-domain", + "hitsAddend": 5, + "quotaModeEnabled": true, + "quotaModeViolations": [0] +}`, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + got := ratelimitToMetadata(tc.req, tc.quotaModeViolations, tc.limitsToCheck) + expected := &structpb.Struct{} + err := protojson.Unmarshal([]byte(tc.expected), expected) + require.NoError(t, err) + + if diff := cmp.Diff(got, expected, protocmp.Transform()); diff != "" { + t.Errorf("diff: %s", diff) + } + }) + } +} diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index f59ce4458..f062d15fc 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -9,37 +9,77 @@ import ( "sync" "time" - "github.com/envoyproxy/ratelimit/src/metrics" - "github.com/envoyproxy/ratelimit/src/stats" - "github.com/envoyproxy/ratelimit/src/trace" - - gostats "github.com/lyft/gostats" - "github.com/coocood/freecache" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - + gostats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" + "github.com/envoyproxy/ratelimit/src/godogstats" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/memcached" + "github.com/envoyproxy/ratelimit/src/metrics" "github.com/envoyproxy/ratelimit/src/redis" "github.com/envoyproxy/ratelimit/src/server" ratelimit "github.com/envoyproxy/ratelimit/src/service" "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/stats" + "github.com/envoyproxy/ratelimit/src/stats/prom" + "github.com/envoyproxy/ratelimit/src/trace" "github.com/envoyproxy/ratelimit/src/utils" ) type Runner struct { - statsManager stats.Manager - settings settings.Settings - srv server.Server - mu sync.Mutex + statsManager stats.Manager + settings settings.Settings + srv server.Server + mu sync.Mutex + ratelimitCloser io.Closer } func NewRunner(s settings.Settings) Runner { + var store gostats.Store + + switch { + case s.DisableStats: + logger.Info("Stats disabled") + store = gostats.NewStore(gostats.NewNullSink(), false) + case s.UseDogStatsd: + if s.UseStatsd || s.UsePrometheus { + logger.Fatalf("Error: unable to use more than one stats sink at the same time. Set one of USE_DOG_STATSD, USE_STATSD, USE_PROMETHEUS.") + } + sink, err := godogstats.NewSink( + godogstats.WithStatsdHost(s.StatsdHost), + godogstats.WithStatsdPort(s.StatsdPort), + godogstats.WithMogrifierFromEnv(s.UseDogStatsdMogrifiers)) + if err != nil { + logger.Fatalf("Failed to create dogstatsd sink: %v", err) + } + logger.Info("Stats initialized for dogstatsd") + store = gostats.NewStore(sink, false) + case s.UseStatsd: + if s.UseDogStatsd || s.UsePrometheus { + logger.Fatalf("Error: unable to use more than one stats sink at the same time. Set one of USE_DOG_STATSD, USE_STATSD, USE_PROMETHEUS.") + } + logger.Info("Stats initialized for statsd") + store = gostats.NewStore(gostats.NewTCPStatsdSink(gostats.WithStatsdHost(s.StatsdHost), gostats.WithStatsdPort(s.StatsdPort)), false) + case s.UsePrometheus: + if s.UseDogStatsd || s.UseStatsd { + logger.Fatalf("Error: unable to use more than one stats sink at the same time. Set one of USE_DOG_STATSD, USE_STATSD, USE_PROMETHEUS.") + } + logger.Info("Stats initialized for Prometheus") + store = gostats.NewStore(prom.NewPrometheusSink(prom.WithAddr(s.PrometheusAddr), + prom.WithPath(s.PrometheusPath), prom.WithMapperYamlPath(s.PrometheusMapperYaml)), false) + default: + logger.Info("Stats initialized for stdout") + store = gostats.NewStore(gostats.NewLoggingSink(), false) + } + + logger.Infof("Stats flush interval: %s", s.StatsFlushInterval) + + go store.Start(time.NewTicker(s.StatsFlushInterval)) + return Runner{ - statsManager: stats.NewStatManager(gostats.NewDefaultStore(), s), + statsManager: stats.NewStatManager(store, s), settings: s, } } @@ -48,7 +88,7 @@ func (runner *Runner) GetStatsStore() gostats.Store { return runner.statsManager.GetStatsStore() } -func createLimiter(srv server.Server, s settings.Settings, localCache *freecache.Cache, statsManager stats.Manager) limiter.RateLimitCache { +func createLimiter(srv server.Server, s settings.Settings, localCache *freecache.Cache, statsManager stats.Manager) (limiter.RateLimitCache, io.Closer) { switch s.BackendType { case "redis", "": return redis.NewRateLimiterCacheImplFromSettings( @@ -67,7 +107,7 @@ func createLimiter(srv server.Server, s settings.Settings, localCache *freecache rand.New(utils.NewLockedSource(time.Now().Unix())), localCache, srv.Scope(), - statsManager) + statsManager), &utils.MultiCloser{} // memcache client can't be closed default: logger.Fatalf("Invalid setting for BackendType: %s", s.BackendType) panic("This line should not be reachable") @@ -115,8 +155,11 @@ func (runner *Runner) Run() { runner.srv = srv runner.mu.Unlock() + limiter, limiterCloser := createLimiter(srv, s, localCache, runner.statsManager) + runner.ratelimitCloser = limiterCloser + service := ratelimit.NewService( - createLimiter(srv, s, localCache, runner.statsManager), + limiter, srv.Provider(), runner.statsManager, srv.HealthChecker(), @@ -130,7 +173,7 @@ func (runner *Runner) Run() { "/rlconfig", "print out the currently loaded configuration for debugging", func(writer http.ResponseWriter, request *http.Request) { - if current, _ := service.GetCurrentConfig(); current != nil { + if current, _, _ := service.GetCurrentConfig(); current != nil { io.WriteString(writer, current.Dump()) } }) @@ -152,4 +195,8 @@ func (runner *Runner) Stop() { if srv != nil { srv.Stop() } + + if runner.ratelimitCloser != nil { + _ = runner.ratelimitCloser.Close() + } } diff --git a/src/settings/settings.go b/src/settings/settings.go index 097047819..7ad2d80df 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -21,6 +21,9 @@ type Settings struct { DebugPort int `envconfig:"DEBUG_PORT" default:"6070"` // GRPC server settings + // If GrpcUds is set we'll listen on the specified unix domain socket address + // rather then GrpcHost:GrpcPort. e.g. GrpcUds=/tmp/ratelimit.sock + GrpcUds string `envconfig:"GRPC_UDS" default:""` GrpcHost string `envconfig:"GRPC_HOST" default:"0.0.0.0"` GrpcPort int `envconfig:"GRPC_PORT" default:"8081"` // GrpcServerTlsConfig configures grpc for the server @@ -69,11 +72,28 @@ type Settings struct { // GrpcClientTlsSAN is the SAN to validate from the client cert during mTLS auth ConfigGrpcXdsServerTlsSAN string `envconfig:"CONFIG_GRPC_XDS_SERVER_TLS_SAN" default:""` + // xDS client backoff configuration + XdsClientBackoffInitialInterval time.Duration `envconfig:"XDS_CLIENT_BACKOFF_INITIAL_INTERVAL" default:"10s"` + XdsClientBackoffMaxInterval time.Duration `envconfig:"XDS_CLIENT_BACKOFF_MAX_INTERVAL" default:"60s"` + XdsClientBackoffRandomFactor float64 `envconfig:"XDS_CLIENT_BACKOFF_RANDOM_FACTOR" default:"0.5"` + XdsClientBackoffJitter bool `envconfig:"XDS_CLIENT_BACKOFF_JITTER" default:"true"` + + // xDS client gRPC options + XdsClientGrpcOptionsMaxMsgSizeInBytes int `envconfig:"XDS_CLIENT_MAX_MSG_SIZE_IN_BYTES" default:""` + // Stats-related settings - UseStatsd bool `envconfig:"USE_STATSD" default:"true"` - StatsdHost string `envconfig:"STATSD_HOST" default:"localhost"` - StatsdPort int `envconfig:"STATSD_PORT" default:"8125"` - ExtraTags map[string]string `envconfig:"EXTRA_TAGS" default:""` + UseDogStatsd bool `envconfig:"USE_DOG_STATSD" default:"false"` + UseDogStatsdMogrifiers []string `envconfig:"USE_DOG_STATSD_MOGRIFIERS" default:""` + UseStatsd bool `envconfig:"USE_STATSD" default:"true"` + StatsdHost string `envconfig:"STATSD_HOST" default:"localhost"` + StatsdPort int `envconfig:"STATSD_PORT" default:"8125"` + ExtraTags map[string]string `envconfig:"EXTRA_TAGS" default:""` + StatsFlushInterval time.Duration `envconfig:"STATS_FLUSH_INTERVAL" default:"10s"` + DisableStats bool `envconfig:"DISABLE_STATS" default:"false"` + UsePrometheus bool `envconfig:"USE_PROMETHEUS" default:"false"` + PrometheusAddr string `envconfig:"PROMETHEUS_ADDR" default:":9090"` + PrometheusPath string `envconfig:"PROMETHEUS_PATH" default:"/metrics"` + PrometheusMapperYaml string `envconfig:"PROMETHEUS_MAPPER_YAML" default:""` // Settings for rate limit configuration RuntimePath string `envconfig:"RUNTIME_ROOT" default:"/srv/runtime_data/current"` @@ -117,12 +137,17 @@ type Settings struct { RedisTlsCACert string `envconfig:"REDIS_TLS_CACERT" default:""` RedisTlsSkipHostnameVerification bool `envconfig:"REDIS_TLS_SKIP_HOSTNAME_VERIFICATION" default:"false"` - // RedisPipelineWindow sets the duration after which internal pipelines will be flushed. - // If window is zero then implicit pipelining will be disabled. Radix use 150us for the - // default value, see https://github.com/mediocregopher/radix/blob/v3.5.1/pool.go#L278. + // RedisPipelineWindow sets the WriteFlushInterval for radix v4 connections. + // This controls how often buffered writes are flushed to the network connection. + // When set to a non-zero value, radix v4 will buffer multiple concurrent write operations + // and flush them together, reducing system calls and improving throughput. + // If zero, each write is flushed immediately (no buffering). + // Required for Redis Cluster mode. Recommended value: 150us-500us. + // See: https://pkg.go.dev/github.com/mediocregopher/radix/v4#Dialer RedisPipelineWindow time.Duration `envconfig:"REDIS_PIPELINE_WINDOW" default:"0"` - // RedisPipelineLimit sets maximum number of commands that can be pipelined before flushing. - // If limit is zero then no limit will be used and pipelines will only be limited by the specified time window. + // RedisPipelineLimit is DEPRECATED and unused in radix v4. + // This setting has no effect. Radix v4 does not support explicit pipeline size limits. + // Write buffering is controlled solely by RedisPipelineWindow (WriteFlushInterval). RedisPipelineLimit int `envconfig:"REDIS_PIPELINE_LIMIT" default:"0"` RedisPerSecond bool `envconfig:"REDIS_PERSECOND" default:"false"` RedisPerSecondSocketType string `envconfig:"REDIS_PERSECOND_SOCKET_TYPE" default:"unix"` @@ -131,14 +156,40 @@ type Settings struct { RedisPerSecondPoolSize int `envconfig:"REDIS_PERSECOND_POOL_SIZE" default:"10"` RedisPerSecondAuth string `envconfig:"REDIS_PERSECOND_AUTH" default:""` RedisPerSecondTls bool `envconfig:"REDIS_PERSECOND_TLS" default:"false"` - // RedisPerSecondPipelineWindow sets the duration after which internal pipelines will be flushed for per second redis. + // RedisSentinelAuth is the password for authenticating to Redis Sentinel nodes (not the Redis master/replica). + // This is separate from RedisAuth which is used for authenticating to the Redis master/replica nodes. + // If empty, no authentication will be attempted when connecting to Sentinel nodes. + RedisSentinelAuth string `envconfig:"REDIS_SENTINEL_AUTH" default:""` + // RedisPerSecondSentinelAuth is the password for authenticating to per-second Redis Sentinel nodes. + // This is separate from RedisPerSecondAuth which is used for authenticating to the Redis master/replica nodes. + // If empty, no authentication will be attempted when connecting to per-second Sentinel nodes. + RedisPerSecondSentinelAuth string `envconfig:"REDIS_PERSECOND_SENTINEL_AUTH" default:""` + // RedisPerSecondPipelineWindow sets the WriteFlushInterval for per-second redis connections. // See comments of RedisPipelineWindow for details. RedisPerSecondPipelineWindow time.Duration `envconfig:"REDIS_PERSECOND_PIPELINE_WINDOW" default:"0"` - // RedisPerSecondPipelineLimit sets maximum number of commands that can be pipelined before flushing for per second redis. + // RedisPerSecondPipelineLimit is DEPRECATED and unused in radix v4. // See comments of RedisPipelineLimit for details. RedisPerSecondPipelineLimit int `envconfig:"REDIS_PERSECOND_PIPELINE_LIMIT" default:"0"` // Enable healthcheck to check Redis Connection. If there is no active connection, healthcheck failed. RedisHealthCheckActiveConnection bool `envconfig:"REDIS_HEALTH_CHECK_ACTIVE_CONNECTION" default:"false"` + // RedisTimeout sets the timeout for Redis connection and I/O operations. + RedisTimeout time.Duration `envconfig:"REDIS_TIMEOUT" default:"10s"` + // RedisPerSecondTimeout sets the timeout for per-second Redis connection and I/O operations. + RedisPerSecondTimeout time.Duration `envconfig:"REDIS_PERSECOND_TIMEOUT" default:"10s"` + + // RedisPoolOnEmptyBehavior controls what happens when Redis connection pool is empty. + // NOTE: In radix v4, the pool ALWAYS blocks when empty (WAIT behavior). + // Possible values: + // - "WAIT": Block until a connection is available (default, radix v4 behavior) + // - "CREATE": NOT SUPPORTED in radix v4 - will cause panic at startup + // - "ERROR": NOT SUPPORTED in radix v4 - will cause panic at startup + // For fail-fast behavior, use context timeouts when calling Redis operations. + RedisPoolOnEmptyBehavior string `envconfig:"REDIS_POOL_ON_EMPTY_BEHAVIOR" default:"WAIT"` + + // RedisPerSecondPoolOnEmptyBehavior controls pool-empty behavior for per-second Redis. + // See RedisPoolOnEmptyBehavior for possible values and details. + RedisPerSecondPoolOnEmptyBehavior string `envconfig:"REDIS_PERSECOND_POOL_ON_EMPTY_BEHAVIOR" default:"WAIT"` + // Memcache settings MemcacheHostPort []string `envconfig:"MEMCACHE_HOST_PORT" default:""` // MemcacheMaxIdleConns sets the maximum number of idle TCP connections per memcached node. @@ -146,13 +197,24 @@ type Settings struct { // number of connections to memcache kept idle in pool, if a connection is needed but none // are idle a new connection is opened, used and closed and can be left in a time-wait state // which can result in high CPU usage. - MemcacheMaxIdleConns int `envconfig:"MEMCACHE_MAX_IDLE_CONNS" default:"2"` - MemcacheSrv string `envconfig:"MEMCACHE_SRV" default:""` - MemcacheSrvRefresh time.Duration `envconfig:"MEMCACHE_SRV_REFRESH" default:"0"` + MemcacheMaxIdleConns int `envconfig:"MEMCACHE_MAX_IDLE_CONNS" default:"2"` + MemcacheSrv string `envconfig:"MEMCACHE_SRV" default:""` + MemcacheSrvRefresh time.Duration `envconfig:"MEMCACHE_SRV_REFRESH" default:"0"` + MemcacheTls bool `envconfig:"MEMCACHE_TLS" default:"false"` + MemcacheTlsConfig *tls.Config + MemcacheTlsClientCert string `envconfig:"MEMCACHE_TLS_CLIENT_CERT" default:""` + MemcacheTlsClientKey string `envconfig:"MEMCACHE_TLS_CLIENT_KEY" default:""` + MemcacheTlsCACert string `envconfig:"MEMCACHE_TLS_CACERT" default:""` + MemcacheTlsSkipHostnameVerification bool `envconfig:"MEMCACHE_TLS_SKIP_HOSTNAME_VERIFICATION" default:"false"` // Should the ratelimiting be running in Global shadow-mode, ie. never report a ratelimit status, unless a rate was provided from envoy as an override GlobalShadowMode bool `envconfig:"SHADOW_MODE" default:"false"` + // Should the ratelimiting be running in Global quota-mode, ie. set metadata but never report OVER_LIMIT status when quota limits are exceeded + GlobalQuotaMode bool `envconfig:"QUOTA_MODE" default:"false"` + + ResponseDynamicMetadata bool `envconfig:"RESPONSE_DYNAMIC_METADATA" default:"false"` + // Allow merging of multiple yaml files referencing the same domain MergeDomainConfigurations bool `envconfig:"MERGE_DOMAIN_CONFIG" default:"false"` @@ -177,6 +239,7 @@ func NewSettings() Settings { } // When we require TLS to connect to Redis, we check if we need to connect using the provided key-pair. RedisTlsConfig(s.RedisTls || s.RedisPerSecondTls)(&s) + MemcacheTlsConfig(s.MemcacheTls)(&s) GrpcServerTlsConfig()(&s) ConfigGrpcXdsServerTlsConfig()(&s) return s @@ -194,6 +257,15 @@ func RedisTlsConfig(redisTls bool) Option { } } +func MemcacheTlsConfig(memcacheTls bool) Option { + return func(s *Settings) { + s.MemcacheTlsConfig = &tls.Config{} + if memcacheTls { + s.MemcacheTlsConfig = utils.TlsConfigFromFiles(s.MemcacheTlsClientCert, s.MemcacheTlsClientKey, s.MemcacheTlsCACert, utils.ServerCA, s.MemcacheTlsSkipHostnameVerification) + } + } +} + func GrpcServerTlsConfig() Option { return func(s *Settings) { if s.GrpcServerUseTLS { diff --git a/src/settings/settings_test.go b/src/settings/settings_test.go index 0a391b78d..b2c7bc7eb 100644 --- a/src/settings/settings_test.go +++ b/src/settings/settings_test.go @@ -1,6 +1,7 @@ package settings import ( + "os" "testing" "github.com/stretchr/testify/assert" @@ -11,3 +12,84 @@ func TestSettingsTlsConfigUnmodified(t *testing.T) { assert.NotNil(t, settings.RedisTlsConfig) assert.Nil(t, settings.RedisTlsConfig.RootCAs) } + +// Tests for RedisPoolOnEmptyBehavior +func TestRedisPoolOnEmptyBehavior_Default(t *testing.T) { + os.Unsetenv("REDIS_POOL_ON_EMPTY_BEHAVIOR") + + settings := NewSettings() + + assert.Equal(t, "WAIT", settings.RedisPoolOnEmptyBehavior) +} + +func TestRedisPoolOnEmptyBehavior_Error(t *testing.T) { + os.Setenv("REDIS_POOL_ON_EMPTY_BEHAVIOR", "ERROR") + defer os.Unsetenv("REDIS_POOL_ON_EMPTY_BEHAVIOR") + + settings := NewSettings() + + assert.Equal(t, "ERROR", settings.RedisPoolOnEmptyBehavior) +} + +func TestRedisPoolOnEmptyBehavior_Create(t *testing.T) { + os.Setenv("REDIS_POOL_ON_EMPTY_BEHAVIOR", "CREATE") + defer os.Unsetenv("REDIS_POOL_ON_EMPTY_BEHAVIOR") + + settings := NewSettings() + + assert.Equal(t, "CREATE", settings.RedisPoolOnEmptyBehavior) +} + +func TestRedisPoolOnEmptyBehavior_Wait(t *testing.T) { + os.Setenv("REDIS_POOL_ON_EMPTY_BEHAVIOR", "WAIT") + defer os.Unsetenv("REDIS_POOL_ON_EMPTY_BEHAVIOR") + + settings := NewSettings() + + assert.Equal(t, "WAIT", settings.RedisPoolOnEmptyBehavior) +} + +func TestRedisPoolOnEmptyBehavior_CaseInsensitive(t *testing.T) { + // Test that lowercase values work (processing is done in driver_impl.go) + os.Setenv("REDIS_POOL_ON_EMPTY_BEHAVIOR", "error") + defer os.Unsetenv("REDIS_POOL_ON_EMPTY_BEHAVIOR") + + settings := NewSettings() + + // Setting stores as-is, case conversion happens in driver_impl.go + assert.Equal(t, "error", settings.RedisPoolOnEmptyBehavior) +} + +// Tests for RedisPerSecondPoolOnEmptyBehavior +func TestRedisPerSecondPoolOnEmptyBehavior_Default(t *testing.T) { + os.Unsetenv("REDIS_PERSECOND_POOL_ON_EMPTY_BEHAVIOR") + + settings := NewSettings() + + assert.Equal(t, "WAIT", settings.RedisPerSecondPoolOnEmptyBehavior) +} + +func TestRedisPerSecondPoolOnEmptyBehavior_Error(t *testing.T) { + os.Setenv("REDIS_PERSECOND_POOL_ON_EMPTY_BEHAVIOR", "ERROR") + defer os.Unsetenv("REDIS_PERSECOND_POOL_ON_EMPTY_BEHAVIOR") + + settings := NewSettings() + + assert.Equal(t, "ERROR", settings.RedisPerSecondPoolOnEmptyBehavior) +} + +// Test both pools can be configured independently +func TestRedisPoolOnEmptyBehavior_IndependentConfiguration(t *testing.T) { + os.Setenv("REDIS_POOL_ON_EMPTY_BEHAVIOR", "ERROR") + os.Setenv("REDIS_PERSECOND_POOL_ON_EMPTY_BEHAVIOR", "CREATE") + defer os.Unsetenv("REDIS_POOL_ON_EMPTY_BEHAVIOR") + defer os.Unsetenv("REDIS_PERSECOND_POOL_ON_EMPTY_BEHAVIOR") + + settings := NewSettings() + + // Main pool configured for fail-fast + assert.Equal(t, "ERROR", settings.RedisPoolOnEmptyBehavior) + + // Per-second pool configured differently + assert.Equal(t, "CREATE", settings.RedisPerSecondPoolOnEmptyBehavior) +} diff --git a/src/srv/srv.go b/src/srv/srv.go index e591c1472..41b033fff 100644 --- a/src/srv/srv.go +++ b/src/srv/srv.go @@ -24,7 +24,7 @@ func ParseSrv(srv string) (string, string, string, error) { matches := srvRegex.FindStringSubmatch(srv) if matches == nil { errorText := fmt.Sprintf("could not parse %s to SRV parts", srv) - logger.Errorf(errorText) + logger.Error(errorText) return "", "", "", errors.New(errorText) } return matches[1], matches[2], matches[3], nil diff --git a/src/srv/srv_test.go b/src/srv/srv_test.go index 64a36682f..3e4d03255 100644 --- a/src/srv/srv_test.go +++ b/src/srv/srv_test.go @@ -8,7 +8,11 @@ import ( ) func mockAddrsLookup(service, proto, name string) (cname string, addrs []*net.SRV, err error) { - return "ignored", []*net.SRV{{"z", 1, 0, 0}, {"z", 0, 0, 0}, {"a", 9001, 0, 0}}, nil + return "ignored", []*net.SRV{ + {Target: "z", Port: 1, Priority: 0, Weight: 0}, + {Target: "z", Port: 0, Priority: 0, Weight: 0}, + {Target: "a", Port: 9001, Priority: 0, Weight: 0}, + }, nil } func TestLookupServerStringsFromSrvReturnsServersSorted(t *testing.T) { diff --git a/src/stats/manager.go b/src/stats/manager.go index ba83fd51d..43d72d59b 100644 --- a/src/stats/manager.go +++ b/src/stats/manager.go @@ -9,6 +9,9 @@ type Manager interface { // NewStats provides a RateLimitStats structure associated with a given descriptorKey. // Multiple calls with the same descriptorKey argument are guaranteed to be equivalent. NewStats(descriptorKey string) RateLimitStats + // Gets stats for a domain (when no descriptors are found) + // Multiple calls with the same domain argument are guaranteed to be equivalent. + NewDomainStats(domain string) DomainStats // Initializes a ShouldRateLimitStats structure. // Multiple calls to this method are idempotent. NewShouldRateLimitStats() ShouldRateLimitStats @@ -52,3 +55,9 @@ type RateLimitStats struct { WithinLimit gostats.Counter ShadowMode gostats.Counter } + +// Stats for a domain entry +type DomainStats struct { + Key string + NotFound gostats.Counter +} diff --git a/src/stats/manager_impl.go b/src/stats/manager_impl.go index efe8aa07b..ee0cf0135 100644 --- a/src/stats/manager_impl.go +++ b/src/stats/manager_impl.go @@ -39,6 +39,13 @@ func (this *ManagerImpl) NewStats(key string) RateLimitStats { return ret } +func (this *ManagerImpl) NewDomainStats(domain string) DomainStats { + ret := DomainStats{} + domain = utils.SanitizeStatName(domain) + ret.NotFound = this.rlStatsScope.NewCounter(domain + ".domain_not_found") + return ret +} + func (this *ManagerImpl) NewShouldRateLimitStats() ShouldRateLimitStats { ret := ShouldRateLimitStats{} ret.RedisError = this.shouldRateLimitScope.NewCounter("redis_error") diff --git a/src/stats/prom/default_mapper.yaml b/src/stats/prom/default_mapper.yaml new file mode 100644 index 000000000..6229ce152 --- /dev/null +++ b/src/stats/prom/default_mapper.yaml @@ -0,0 +1,97 @@ +# Requires statsd exporter >= v0.6.0 since it uses the "drop" action. +mappings: + - match: "ratelimit.service.rate_limit.*.*.near_limit" + name: "ratelimit_service_rate_limit_near_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + - match: "ratelimit.service.rate_limit.*.*.over_limit" + name: "ratelimit_service_rate_limit_over_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + - match: "ratelimit.service.rate_limit.*.*.total_hits" + name: "ratelimit_service_rate_limit_total_hits" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + - match: "ratelimit.service.rate_limit.*.*.within_limit" + name: "ratelimit_service_rate_limit_within_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + - match: "ratelimit.service.rate_limit.*.*.shadow_mode" + name: "ratelimit_service_rate_limit_shadow_mode" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + + - match: "ratelimit\\.service\\.rate_limit\\.([^\\.]*)\\.([^\\.]*)\\.([^\\.]*)(\\..*)?\\.near_limit" + match_type: regex + name: "ratelimit_service_rate_limit_near_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" + - match: "ratelimit\\.service\\.rate_limit\\.([^\\.]*)\\.([^\\.]*)\\.([^\\.]*)(\\..*)?\\.over_limit" + match_type: regex + name: "ratelimit_service_rate_limit_over_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" + - match: "ratelimit\\.service\\.rate_limit\\.([^\\.]*)\\.([^\\.]*)\\.([^\\.]*)(\\..*)?\\.total_hits" + match_type: regex + name: "ratelimit_service_rate_limit_total_hits" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" + - match: "ratelimit\\.service\\.rate_limit\\.([^\\.]*)\\.([^\\.]*)\\.([^\\.]*)(\\..*)?\\.within_limit" + match_type: regex + name: "ratelimit_service_rate_limit_within_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" + - match: "ratelimit\\.service\\.rate_limit\\.([^\\.]*)\\.([^\\.]*)\\.([^\\.]*)(\\..*)?\\.shadow_mode" + match_type: regex + name: "ratelimit_service_rate_limit_shadow_mode" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" + + - match: "ratelimit.service.call.should_rate_limit.*" + name: "ratelimit_service_should_rate_limit_error" + match_metric_type: counter + labels: + err_type: "$1" + + - match: "ratelimit_server.*.total_requests" + name: "ratelimit_service_total_requests" + match_metric_type: counter + labels: + grpc_method: "$1" + - match: "ratelimit_server.*.response_time" + name: "ratelimit_service_response_time_seconds" + timer_type: histogram + labels: + grpc_method: "$1" + + - match: "ratelimit.service.config_load_success" + name: "ratelimit_service_config_load_success" + match_metric_type: counter + - match: "ratelimit.service.config_load_error" + name: "ratelimit_service_config_load_error" + match_metric_type: counter diff --git a/src/stats/prom/prometheus_sink.go b/src/stats/prom/prometheus_sink.go new file mode 100644 index 000000000..2c93c49c8 --- /dev/null +++ b/src/stats/prom/prometheus_sink.go @@ -0,0 +1,163 @@ +package prom + +import ( + _ "embed" + "net/http" + + "github.com/go-kit/log" + gostats "github.com/lyft/gostats" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/prometheus/statsd_exporter/pkg/event" + "github.com/prometheus/statsd_exporter/pkg/exporter" + "github.com/prometheus/statsd_exporter/pkg/mapper" + "github.com/sirupsen/logrus" +) + +var ( + //go:embed default_mapper.yaml + defaultMapper string + _ gostats.Sink = &prometheusSink{} + + eventsActions = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "statsd_exporter_events_actions_total", + Help: "The total number of StatsD events by action.", + }, + []string{"action"}, + ) + eventsUnmapped = promauto.NewCounter( + prometheus.CounterOpts{ + Name: "statsd_exporter_events_unmapped_total", + Help: "The total number of StatsD events no mapping was found for.", + }) + metricsCount = promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "statsd_exporter_metrics_total", + Help: "The total number of metrics.", + }, + []string{"type"}, + ) + conflictingEventStats = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "statsd_exporter_events_conflict_total", + Help: "The total number of StatsD events with conflicting names.", + }, + []string{"type"}, + ) + eventStats = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "statsd_exporter_events_total", + Help: "The total number of StatsD events seen.", + }, + []string{"type"}, + ) + errorEventStats = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "statsd_exporter_events_error_total", + Help: "The total number of StatsD events discarded due to errors.", + }, + []string{"reason"}, + ) +) + +type prometheusSink struct { + config struct { + addr string + path string + mapperYamlPath string + } + mapper *mapper.MetricMapper + events chan event.Events + exp *exporter.Exporter +} + +type prometheusSinkOption func(sink *prometheusSink) + +func WithAddr(addr string) prometheusSinkOption { + return func(sink *prometheusSink) { + sink.config.addr = addr + } +} + +func WithPath(path string) prometheusSinkOption { + return func(sink *prometheusSink) { + sink.config.path = path + } +} + +func WithMapperYamlPath(mapperYamlPath string) prometheusSinkOption { + return func(sink *prometheusSink) { + sink.config.mapperYamlPath = mapperYamlPath + } +} + +// NewPrometheusSink returns a Sink that flushes stats to os.StdErr. +func NewPrometheusSink(opts ...prometheusSinkOption) gostats.Sink { + promRegistry := prometheus.DefaultRegisterer + sink := &prometheusSink{ + events: make(chan event.Events), + mapper: &mapper.MetricMapper{ + Registerer: promRegistry, + }, + } + for _, opt := range opts { + opt(sink) + } + if sink.config.addr == "" { + sink.config.addr = ":9090" + } + if sink.config.path == "" { + sink.config.path = "/metrics" + } + http.Handle(sink.config.path, promhttp.Handler()) + go func() { + logrus.Infof("Starting prometheus sink on %s%s", sink.config.addr, sink.config.path) + _ = http.ListenAndServe(sink.config.addr, nil) + }() + if sink.config.mapperYamlPath != "" { + _ = sink.mapper.InitFromFile(sink.config.mapperYamlPath) + } else { + _ = sink.mapper.InitFromYAMLString(defaultMapper) + } + + sink.exp = exporter.NewExporter(promRegistry, + sink.mapper, log.NewNopLogger(), + eventsActions, eventsUnmapped, + errorEventStats, eventStats, + conflictingEventStats, metricsCount) + + go func() { + sink.exp.Listen(sink.events) + }() + + return sink +} + +func (s *prometheusSink) FlushCounter(name string, value uint64) { + logrus.Debugf("FlushCounter: %s %d", name, value) + s.events <- event.Events{&event.CounterEvent{ + CMetricName: name, + CValue: float64(value), + CLabels: make(map[string]string), + }} +} + +func (s *prometheusSink) FlushGauge(name string, value uint64) { + logrus.Debugf("FlushGauge: %s %d", name, value) + s.events <- event.Events{&event.GaugeEvent{ + GMetricName: name, + GValue: float64(value), + GLabels: make(map[string]string), + }} +} + +func (s *prometheusSink) FlushTimer(name string, value float64) { + logrus.Debugf("FlushTimer: %s %v", name, value) + s.events <- event.Events{&event.ObserverEvent{ + OMetricName: name, + OValue: value, + OLabels: make(map[string]string), + }} +} diff --git a/src/stats/prom/prometheus_sink_test.go b/src/stats/prom/prometheus_sink_test.go new file mode 100644 index 000000000..23817b02c --- /dev/null +++ b/src/stats/prom/prometheus_sink_test.go @@ -0,0 +1,119 @@ +package prom + +import ( + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var s = NewPrometheusSink() + +func TestFlushCounter(t *testing.T) { + s.FlushCounter("ratelimit_server.ShouldRateLimit.total_requests", 1) + assert.Eventually(t, func() bool { + metricFamilies, err := prometheus.DefaultGatherer.Gather() + require.NoError(t, err) + + metrics := make(map[string]*dto.MetricFamily) + for _, metricFamily := range metricFamilies { + metrics[*metricFamily.Name] = metricFamily + } + + m, ok := metrics["ratelimit_service_total_requests"] + require.True(t, ok) + require.Len(t, m.Metric, 1) + require.Equal(t, map[string]string{ + "grpc_method": "ShouldRateLimit", + }, toMap(m.Metric[0].Label)) + require.Equal(t, 1.0, *m.Metric[0].Counter.Value) + return true + }, time.Second, time.Millisecond) +} + +func toMap(labels []*dto.LabelPair) map[string]string { + m := make(map[string]string) + for _, l := range labels { + m[*l.Name] = *l.Value + } + return m +} + +func TestFlushCounterWithDifferentLabels(t *testing.T) { + s.FlushCounter("ratelimit.service.rate_limit.domain1.key1_val1.over_limit", 1) + s.FlushCounter("ratelimit.service.rate_limit.domain1.key1_val1.key2_val2.over_limit", 2) + s.FlushCounter("ratelimit.service.rate_limit.domain1.key3_val3.key4_val4.over_limit", 1) + s.FlushCounter("ratelimit.service.rate_limit.domain1.key3_val3.key4_val4.key5_val5.over_limit", 2) + assert.Eventually(t, func() bool { + metricFamilies, err := prometheus.DefaultGatherer.Gather() + require.NoError(t, err) + + metrics := make(map[string]*dto.MetricFamily) + for _, metricFamily := range metricFamilies { + metrics[*metricFamily.Name] = metricFamily + } + + m, ok := metrics["ratelimit_service_rate_limit_over_limit"] + require.True(t, ok) + require.Len(t, m.Metric, 3) + require.Equal(t, 1.0, *m.Metric[0].Counter.Value) + require.Equal(t, map[string]string{ + "domain": "domain1", + "key1": "key1_val1", + }, toMap(m.Metric[0].Label)) + require.Equal(t, 2.0, *m.Metric[1].Counter.Value) + require.Equal(t, map[string]string{ + "domain": "domain1", + "key1": "key1_val1", + "key2": "key2_val2", + }, toMap(m.Metric[1].Label)) + require.Equal(t, 3.0, *m.Metric[2].Counter.Value) + require.Equal(t, map[string]string{ + "domain": "domain1", + "key1": "key3_val3", + "key2": "key4_val4", + }, toMap(m.Metric[2].Label)) + return true + }, time.Second, time.Millisecond) +} + +func TestFlushGauge(t *testing.T) { + s.FlushGauge("ratelimit.service.rate_limit.domain1.key1.test_gauge", 1) + metricFamilies, err := prometheus.DefaultGatherer.Gather() + require.NoError(t, err) + + metrics := make(map[string]*dto.MetricFamily) + for _, metricFamily := range metricFamilies { + metrics[*metricFamily.Name] = metricFamily + } + + _, ok := metrics["ratelimit_service_rate_limit_test_gauge"] + require.False(t, ok) +} + +func TestFlushTimer(t *testing.T) { + s.FlushTimer("ratelimit.service.rate_limit.mongo_cps.database_users.total_hits", 1) + assert.Eventually(t, func() bool { + metricFamilies, err := prometheus.DefaultGatherer.Gather() + require.NoError(t, err) + + metrics := make(map[string]*dto.MetricFamily) + for _, metricFamily := range metricFamilies { + metrics[*metricFamily.Name] = metricFamily + } + + m, ok := metrics["ratelimit_service_rate_limit_total_hits"] + require.True(t, ok) + require.Len(t, m.Metric, 1) + require.Equal(t, uint64(1), *m.Metric[0].Histogram.SampleCount) + require.Equal(t, map[string]string{ + "domain": "mongo_cps", + "key1": "database_users", + }, toMap(m.Metric[0].Label)) + require.Equal(t, 1.0, *m.Metric[0].Histogram.SampleSum) + return true + }, time.Second, time.Millisecond) +} diff --git a/src/utils/multi_closer.go b/src/utils/multi_closer.go new file mode 100644 index 000000000..fead3f6b1 --- /dev/null +++ b/src/utils/multi_closer.go @@ -0,0 +1,18 @@ +package utils + +import ( + "errors" + "io" +) + +type MultiCloser struct { + Closers []io.Closer +} + +func (m *MultiCloser) Close() error { + var e error + for _, closer := range m.Closers { + e = errors.Join(closer.Close()) + } + return e +} diff --git a/src/utils/utilities.go b/src/utils/utilities.go index 48f7f7ca0..fb398b764 100644 --- a/src/utils/utilities.go +++ b/src/utils/utilities.go @@ -1,10 +1,11 @@ package utils import ( + "regexp" "strings" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/golang/protobuf/ptypes/duration" + "google.golang.org/protobuf/types/known/durationpb" ) // Interface for a time source. @@ -26,22 +27,21 @@ func UnitToDivider(unit pb.RateLimitResponse_RateLimit_Unit) int64 { return 60 * 60 case pb.RateLimitResponse_RateLimit_DAY: return 60 * 60 * 24 + case pb.RateLimitResponse_RateLimit_WEEK: + return 60 * 60 * 24 * 7 + case pb.RateLimitResponse_RateLimit_MONTH: + return 60 * 60 * 24 * 30 + case pb.RateLimitResponse_RateLimit_YEAR: + return 60 * 60 * 24 * 365 } panic("should not get here") } -func CalculateReset(unit *pb.RateLimitResponse_RateLimit_Unit, timeSource TimeSource) *duration.Duration { +func CalculateReset(unit *pb.RateLimitResponse_RateLimit_Unit, timeSource TimeSource) *durationpb.Duration { sec := UnitToDivider(*unit) now := timeSource.UnixNow() - return &duration.Duration{Seconds: sec - now%sec} -} - -func Max(a uint32, b uint32) uint32 { - if a > b { - return a - } - return b + return &durationpb.Duration{Seconds: sec - now%sec} } // Mask credentials from a redis connection string like @@ -62,8 +62,29 @@ func MaskCredentialsInUrl(url string) string { return strings.Join(urls, ",") } +var ipv4Regex = regexp.MustCompile(`\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}`) + // Remove invalid characters from the stat name. func SanitizeStatName(s string) string { r := strings.NewReplacer(":", "_", "|", "_") - return r.Replace(s) + return ipv4Regex.ReplaceAllStringFunc(r.Replace(s), func(ip string) string { + return strings.ReplaceAll(ip, ".", "_") + }) +} + +func GetHitsAddends(request *pb.RateLimitRequest) []uint64 { + hitsAddends := make([]uint64, len(request.Descriptors)) + + for i, descriptor := range request.Descriptors { + if descriptor.HitsAddend != nil { + // If the per descriptor hits_addend is set, use that. It allows to be zero. The zero value is + // means check only by no increment the hits. + hitsAddends[i] = descriptor.HitsAddend.Value + } else { + // If the per descriptor hits_addend is not set, use the request's hits_addend. If the value is + // zero (default value if not specified by the caller), use 1 for backward compatibility. + hitsAddends[i] = uint64(max(1, uint64(request.HitsAddend))) + } + } + return hitsAddends } diff --git a/test/common/common.go b/test/common/common.go index 1062a8995..6a414ca09 100644 --- a/test/common/common.go +++ b/test/common/common.go @@ -14,38 +14,47 @@ import ( "github.com/stretchr/testify/assert" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/wrapperspb" pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" ) type TestStatSink struct { - sync.Mutex + mu sync.Mutex Record map[string]interface{} } +func NewTestStatSink() *TestStatSink { + return &TestStatSink{ + mu: sync.Mutex{}, + Record: make(map[string]interface{}), + } +} + func (s *TestStatSink) Clear() { - s.Lock() - s.Record = map[string]interface{}{} - s.Unlock() + s.mu.Lock() + s.Record = make(map[string]interface{}) + s.mu.Unlock() } func (s *TestStatSink) FlushCounter(name string, value uint64) { - s.Lock() + s.mu.Lock() s.Record[name] = value - s.Unlock() + s.mu.Unlock() } func (s *TestStatSink) FlushGauge(name string, value uint64) { - s.Lock() + s.mu.Lock() + fmt.Println("FlushGauge", name, value) s.Record[name] = value - s.Unlock() + s.mu.Unlock() } func (s *TestStatSink) FlushTimer(name string, value float64) { - s.Lock() + s.mu.Lock() s.Record[name] = value - s.Unlock() + s.mu.Unlock() } func NewRateLimitRequest(domain string, descriptors [][][2]string, hitsAddend uint32) *pb.RateLimitRequest { @@ -64,6 +73,16 @@ func NewRateLimitRequest(domain string, descriptors [][][2]string, hitsAddend ui return request } +func NewRateLimitRequestWithPerDescriptorHitsAddend(domain string, descriptors [][][2]string, + hitsAddends []uint64, +) *pb.RateLimitRequest { + request := NewRateLimitRequest(domain, descriptors, 1) + for i, hitsAddend := range hitsAddends { + request.Descriptors[i].HitsAddend = &wrapperspb.UInt64Value{Value: hitsAddend} + } + return request +} + func AssertProtoEqual(assert *assert.Assertions, expected proto.Message, actual proto.Message) { assert.True(proto.Equal(expected, actual), fmt.Sprintf("These two protobuf messages are not equal:\nexpected: %v\nactual: %v", expected, actual)) @@ -86,7 +105,7 @@ func WaitForTcpPort(ctx context.Context, port int, timeout time.Duration) error // Wait up to 1s for the redis instance to start accepting connections. for { var d net.Dialer - conn, err := d.DialContext(ctx, "tcp", "localhost:"+strconv.Itoa(port)) + conn, err := d.DialContext(timeoutCtx, "tcp", "localhost:"+strconv.Itoa(port)) if err == nil { conn.Close() // TCP connections are working. All is well. diff --git a/test/config/basic_config.yaml b/test/config/basic_config.yaml index 1ce7c9afa..8992966fc 100644 --- a/test/config/basic_config.yaml +++ b/test/config/basic_config.yaml @@ -42,6 +42,7 @@ descriptors: - key: key4 rate_limit: + name: key4_rate_limit unit: day requests_per_unit: 1 diff --git a/test/config/config_test.go b/test/config/config_test.go index 3ed3984ba..c32f84155 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -12,6 +12,7 @@ import ( pb_type "github.com/envoyproxy/go-control-plane/envoy/type/v3" stats "github.com/lyft/gostats" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/envoyproxy/ratelimit/src/config" mockstats "github.com/envoyproxy/ratelimit/test/mocks/stats" @@ -32,8 +33,11 @@ func TestBasicConfig(t *testing.T) { rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), mockstats.NewMockStatManager(stats), false) rlConfig.Dump() assert.Equal(rlConfig.IsEmptyDomains(), false) + assert.EqualValues(0, stats.NewCounter("foo_domain.domain_not_found").Value()) assert.Nil(rlConfig.GetLimit(context.TODO(), "foo_domain", &pb_struct.RateLimitDescriptor{})) + assert.EqualValues(1, stats.NewCounter("foo_domain.domain_not_found").Value()) assert.Nil(rlConfig.GetLimit(context.TODO(), "test-domain", &pb_struct.RateLimitDescriptor{})) + assert.EqualValues(0, stats.NewCounter("test-domain.domain_not_found").Value()) rl := rlConfig.GetLimit( context.TODO(), "test-domain", @@ -148,6 +152,7 @@ func TestBasicConfig(t *testing.T) { rl.Stats.NearLimit.Inc() rl.Stats.WithinLimit.Inc() assert.EqualValues(1, rl.Limit.RequestsPerUnit) + assert.Empty(rl.Limit.Name, "No name provided in config") assert.Equal(pb.RateLimitResponse_RateLimit_HOUR, rl.Limit.Unit) assert.EqualValues(1, stats.NewCounter("test-domain.key3.total_hits").Value()) assert.EqualValues(1, stats.NewCounter("test-domain.key3.over_limit").Value()) @@ -164,6 +169,7 @@ func TestBasicConfig(t *testing.T) { rl.Stats.NearLimit.Inc() rl.Stats.WithinLimit.Inc() assert.EqualValues(1, rl.Limit.RequestsPerUnit) + assert.EqualValues("key4_rate_limit", rl.Limit.Name, "Name provided in config") assert.Equal(pb.RateLimitResponse_RateLimit_DAY, rl.Limit.Unit) assert.EqualValues(1, stats.NewCounter("test-domain.key4.total_hits").Value()) assert.EqualValues(1, stats.NewCounter("test-domain.key4.over_limit").Value()) @@ -586,6 +592,10 @@ func TestWildcardConfig(t *testing.T) { }) assert.NotNil(withoutVal1) assert.Equal(withoutVal1, withoutVal2) + // Verify stats keys for no value descriptors + assert.Equal("test-domain.noVal", withoutVal1.Stats.Key, "No value descriptor should use just the key") + assert.Equal(withoutVal1.Stats.Key, withoutVal1.FullKey, "FullKey should match Stats.Key") + assert.Equal(withoutVal1.Stats.Key, withoutVal2.Stats.Key, "All no-value matches should have same stats key") // Matches multiple wildcard values and results are equal wildcard1 := rlConfig.GetLimit( @@ -605,7 +615,12 @@ func TestWildcardConfig(t *testing.T) { }) assert.NotNil(wildcard1) assert.Equal(wildcard1, wildcard2) + assert.Equal("test-domain.wild_foo*", wildcard1.Stats.Key, "Wildcard stats key should include the wildcard pattern with *") + assert.Equal("test-domain.wild_foo*", wildcard1.FullKey, "Wildcard FullKey should match Stats.Key") + assert.Equal("test-domain.wild_foo*", wildcard2.Stats.Key, "All wildcard matches should have same stats key with wildcard pattern") assert.NotNil(wildcard3) + assert.Equal("test-domain.nestedWild_val1.wild_goo*", wildcard3.Stats.Key, "Nested wildcard stats key should include the wildcard pattern with *") + assert.Equal("test-domain.nestedWild_val1.wild_goo*", wildcard3.FullKey, "Nested wildcard FullKey should match Stats.Key") // Doesn't match non-matching values noMatch := rlConfig.GetLimit( @@ -631,3 +646,1336 @@ func TestWildcardConfig(t *testing.T) { }) assert.Nil(midWildcard) } + +func TestDetailedMetric(t *testing.T) { + assert := require.New(t) + stats := stats.NewStore(stats.NewNullSink(), false) + + // Descriptor config with a realistic nested setup, that is re-used across + // multiple test cases. + realisticExampleConfig := &config.YamlRoot{ + Domain: "nested", + Descriptors: []config.YamlDescriptor{ + { + Key: "key_1", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 500, + Unit: "minute", + }, + DetailedMetric: true, + }, + { + Key: "key_1", + Value: "some-value-for-key-1", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 500, + Unit: "minute", + }, + }, + { + Key: "key_2", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 5000, + Unit: "minute", + }, + Descriptors: []config.YamlDescriptor{ + { + Key: "key_3", + DetailedMetric: true, + }, + { + Key: "key_3", + Value: "requested-key3-value", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 5, + Unit: "minute", + }, + DetailedMetric: true, + }, + }, + DetailedMetric: true, + }, + { + Key: "key_2", + Value: "specific-id", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 50000, + Unit: "minute", + }, + Descriptors: []config.YamlDescriptor{ + { + Key: "key_3", + DetailedMetric: true, + }, + { + Key: "key_3", + Value: "requested-key3-value", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 100, + Unit: "minute", + }, + DetailedMetric: true, + }, + }, + DetailedMetric: true, + }, + }, + } + + tests := []struct { + name string + config []config.RateLimitConfigToLoad + request *pb_struct.RateLimitDescriptor + expectedStatsKey string + }{ + { + name: "nested with no values but request only top-level key", + config: []config.RateLimitConfigToLoad{ + { + ConfigYaml: &config.YamlRoot{ + Domain: "nested", + Descriptors: []config.YamlDescriptor{ + { + Key: "key-1", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 100, + Unit: "minute", + }, + Descriptors: []config.YamlDescriptor{ + { + Key: "key-2", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 5, + Unit: "minute", + }, + }, + }, + DetailedMetric: true, + }, + }, + }, + }, + }, + request: &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + { + Key: "key-1", + Value: "value-1", + }, + }, + }, + expectedStatsKey: "nested.key-1_value-1", + }, + { + name: "nested with no values but request only top-level key with no detailed metric", + config: []config.RateLimitConfigToLoad{ + { + ConfigYaml: &config.YamlRoot{ + Domain: "nested", + Descriptors: []config.YamlDescriptor{ + { + Key: "key-1", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 100, + Unit: "minute", + }, + Descriptors: []config.YamlDescriptor{ + { + Key: "key-2", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 5, + Unit: "minute", + }, + }, + }, + DetailedMetric: false, + }, + }, + }, + }, + }, + request: &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + { + Key: "key-1", + Value: "value-1", + }, + }, + }, + expectedStatsKey: "nested.key-1", + }, + { + name: "nested with no values and request fully nested descriptors", + config: []config.RateLimitConfigToLoad{ + { + ConfigYaml: &config.YamlRoot{ + Domain: "nested", + Descriptors: []config.YamlDescriptor{ + { + Key: "key-1", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 100, + Unit: "minute", + }, + Descriptors: []config.YamlDescriptor{ + { + Key: "key-2", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 5, + Unit: "minute", + }, + DetailedMetric: true, + }, + }, + DetailedMetric: true, + }, + }, + }, + }, + }, + request: &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + { + Key: "key-1", + Value: "value-1", + }, + { + Key: "key-2", + Value: "value-2", + }, + }, + }, + expectedStatsKey: "nested.key-1_value-1.key-2_value-2", + }, + { + name: "nested with no values and request fully nested descriptors with no detailed metric", + config: []config.RateLimitConfigToLoad{ + { + ConfigYaml: &config.YamlRoot{ + Domain: "nested", + Descriptors: []config.YamlDescriptor{ + { + Key: "key-1", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 100, + Unit: "minute", + }, + Descriptors: []config.YamlDescriptor{ + { + Key: "key-2", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 5, + Unit: "minute", + }, + DetailedMetric: false, + }, + }, + DetailedMetric: false, + }, + }, + }, + }, + }, + request: &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + { + Key: "key-1", + Value: "value-1", + }, + { + Key: "key-2", + Value: "value-2", + }, + }, + }, + expectedStatsKey: "nested.key-1.key-2", + }, + { + name: "test nested descriptors with simple request", + config: []config.RateLimitConfigToLoad{ + { + ConfigYaml: realisticExampleConfig, + }, + }, + request: &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + { + Key: "key_1", + Value: "value-for-key-1", + }, + }, + }, + expectedStatsKey: "nested.key_1_value-for-key-1", + }, + { + name: "test nested only second descriptor request not nested", + config: []config.RateLimitConfigToLoad{ + { + ConfigYaml: realisticExampleConfig, + }, + }, + request: &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + { + Key: "key_2", + Value: "key-2-value", + }, + }, + }, + expectedStatsKey: "nested.key_2_key-2-value", + }, + { + name: "test nested descriptors with nested request", + config: []config.RateLimitConfigToLoad{ + { + ConfigYaml: realisticExampleConfig, + }, + }, + request: &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + { + Key: "key_2", + Value: "key-2-value", + }, + { + Key: "key_3", + Value: "requested-key3-value", + }, + }, + }, + expectedStatsKey: "nested.key_2_key-2-value.key_3_requested-key3-value", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rlConfig := config.NewRateLimitConfigImpl(tt.config, mockstats.NewMockStatManager(stats), true) + rlConfig.Dump() + + rl := rlConfig.GetLimit( + context.TODO(), "nested", + tt.request, + ) + assert.NotNil(rl) + assert.Equal(tt.expectedStatsKey, rl.Stats.Key) + }) + } +} + +func TestValueToMetric_UsesRuntimeValuesInStats(t *testing.T) { + asrt := assert.New(t) + store := stats.NewStore(stats.NewNullSink(), false) + + cfg := []config.RateLimitConfigToLoad{ + { + Name: "inline", + ConfigYaml: &config.YamlRoot{ + Domain: "domain", + Descriptors: []config.YamlDescriptor{ + { + Key: "route", + ValueToMetric: true, + Descriptors: []config.YamlDescriptor{ + { + Key: "http_method", + ValueToMetric: true, + Descriptors: []config.YamlDescriptor{ + { + Key: "subject_id", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 60, + Unit: "minute", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + rlConfig := config.NewRateLimitConfigImpl(cfg, mockstats.NewMockStatManager(store), false) + + rl := rlConfig.GetLimit( + context.TODO(), "domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "route", Value: "draw"}, + {Key: "http_method", Value: "GET"}, + {Key: "subject_id", Value: "123"}, + }, + }, + ) + asrt.NotNil(rl) + + // Should include actual runtime values for keys that set value_to_metric: true + expectedKey := "domain.route_draw.http_method_GET.subject_id" + asrt.Equal(expectedKey, rl.Stats.Key) + + // Increment a couple of counters to ensure the key is actually used in stats + rl.Stats.TotalHits.Inc() + rl.Stats.WithinLimit.Inc() + + asrt.EqualValues(1, store.NewCounter(expectedKey+".total_hits").Value()) + asrt.EqualValues(1, store.NewCounter(expectedKey+".within_limit").Value()) +} + +func TestValueToMetric_DefaultKeyIncludesValueAtThatLevel(t *testing.T) { + asrt := assert.New(t) + store := stats.NewStore(stats.NewNullSink(), false) + + cfg := []config.RateLimitConfigToLoad{ + { + Name: "inline", + ConfigYaml: &config.YamlRoot{ + Domain: "d", + Descriptors: []config.YamlDescriptor{ + { + Key: "k1", + ValueToMetric: true, + Descriptors: []config.YamlDescriptor{ + { + Key: "k2", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 1, + Unit: "second", + }, + }, + }, + }, + }, + }, + }, + } + + rlConfig := config.NewRateLimitConfigImpl(cfg, mockstats.NewMockStatManager(store), false) + rl := rlConfig.GetLimit( + context.TODO(), "d", + &pb_struct.RateLimitDescriptor{Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "k1", Value: "A"}, + {Key: "k2", Value: "foo"}, + }}, + ) + asrt.NotNil(rl) + asrt.Equal("d.k1_A.k2", rl.Stats.Key) +} + +func TestValueToMetric_MidLevelOnly(t *testing.T) { + asrt := assert.New(t) + store := stats.NewStore(stats.NewNullSink(), false) + + cfg := []config.RateLimitConfigToLoad{ + { + Name: "inline", + ConfigYaml: &config.YamlRoot{ + Domain: "d", + Descriptors: []config.YamlDescriptor{ + { + Key: "k1", + Descriptors: []config.YamlDescriptor{ + { + Key: "k2", + ValueToMetric: true, + Descriptors: []config.YamlDescriptor{ + { + Key: "k3", + RateLimit: &config.YamlRateLimit{RequestsPerUnit: 1, Unit: "second"}, + }, + }, + }, + }, + }, + }, + }, + }, + } + + rlConfig := config.NewRateLimitConfigImpl(cfg, mockstats.NewMockStatManager(store), false) + rl := rlConfig.GetLimit( + context.TODO(), "d", + &pb_struct.RateLimitDescriptor{Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "k1", Value: "X"}, + {Key: "k2", Value: "Y"}, + {Key: "k3", Value: "Z"}, + }}, + ) + asrt.NotNil(rl) + // k1 has no flag -> just key; k2 has flag -> include value + asrt.Equal("d.k1.k2_Y.k3", rl.Stats.Key) +} + +func TestValueToMetric_NoFlag_Unchanged(t *testing.T) { + asrt := assert.New(t) + store := stats.NewStore(stats.NewNullSink(), false) + + cfg := []config.RateLimitConfigToLoad{ + { + Name: "inline", + ConfigYaml: &config.YamlRoot{ + Domain: "d", + Descriptors: []config.YamlDescriptor{ + { + Key: "k1", + Descriptors: []config.YamlDescriptor{ + { + Key: "k2", + RateLimit: &config.YamlRateLimit{RequestsPerUnit: 1, Unit: "second"}, + }, + }, + }, + }, + }, + }, + } + + rlConfig := config.NewRateLimitConfigImpl(cfg, mockstats.NewMockStatManager(store), false) + rl := rlConfig.GetLimit( + context.TODO(), "d", + &pb_struct.RateLimitDescriptor{Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "k1", Value: "X"}, + {Key: "k2", Value: "Y"}, + }}, + ) + asrt.NotNil(rl) + // No flags anywhere -> same as old behavior when default matched at k1 + asrt.Equal("d.k1.k2", rl.Stats.Key) +} + +func TestValueToMetric_DoesNotOverrideDetailedMetric(t *testing.T) { + asrt := assert.New(t) + store := stats.NewStore(stats.NewNullSink(), false) + + cfg := []config.RateLimitConfigToLoad{ + { + Name: "inline", + ConfigYaml: &config.YamlRoot{ + Domain: "domain", + Descriptors: []config.YamlDescriptor{ + { + Key: "route", + ValueToMetric: true, + Descriptors: []config.YamlDescriptor{ + { + Key: "http_method", + ValueToMetric: true, + Descriptors: []config.YamlDescriptor{ + { + Key: "subject_id", + DetailedMetric: true, + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 60, + Unit: "minute", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + rlConfig := config.NewRateLimitConfigImpl(cfg, mockstats.NewMockStatManager(store), false) + + rl := rlConfig.GetLimit( + context.TODO(), "domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "route", Value: "draw"}, + {Key: "http_method", Value: "GET"}, + {Key: "subject_id", Value: "123"}, + }, + }, + ) + asrt.NotNil(rl) + + // With detailed_metric at the leaf, the detailed metric key should be used, regardless of value_to_metric flags + expectedKey := "domain.route_draw.http_method_GET.subject_id_123" + asrt.Equal(expectedKey, rl.Stats.Key) + + rl.Stats.TotalHits.Inc() + asrt.EqualValues(1, store.NewCounter(expectedKey+".total_hits").Value()) +} + +func TestValueToMetric_WithConfiguredValues(t *testing.T) { + asrt := assert.New(t) + store := stats.NewStore(stats.NewNullSink(), false) + + cfg := []config.RateLimitConfigToLoad{ + { + Name: "inline", + ConfigYaml: &config.YamlRoot{ + Domain: "test-domain", + Descriptors: []config.YamlDescriptor{ + { + Key: "route", + ValueToMetric: true, + Descriptors: []config.YamlDescriptor{ + { + Key: "http_method", + Value: "GET", // Configured value in descriptor + ValueToMetric: true, + Descriptors: []config.YamlDescriptor{ + { + Key: "subject_id", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 60, + Unit: "minute", + }, + }, + }, + }, + { + Key: "http_method", + Value: "POST", // Another configured value + ValueToMetric: true, + Descriptors: []config.YamlDescriptor{ + { + Key: "subject_id", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 30, + Unit: "minute", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + rlConfig := config.NewRateLimitConfigImpl(cfg, mockstats.NewMockStatManager(store), false) + + // Test GET path - should include runtime value for route, but use configured value for http_method + rl := rlConfig.GetLimit( + context.TODO(), "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "route", Value: "api"}, + {Key: "http_method", Value: "GET"}, + {Key: "subject_id", Value: "user123"}, + }, + }, + ) + asrt.NotNil(rl) + asrt.EqualValues(60, rl.Limit.RequestsPerUnit) + // route has value_to_metric=true, so includes runtime value; http_method has configured value, so uses that + expectedKey := "test-domain.route_api.http_method_GET.subject_id" + asrt.Equal(expectedKey, rl.Stats.Key) + + // Test POST path - should include runtime value for route, but use configured value for http_method + rl = rlConfig.GetLimit( + context.TODO(), "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "route", Value: "api"}, + {Key: "http_method", Value: "POST"}, + {Key: "subject_id", Value: "user456"}, + }, + }, + ) + asrt.NotNil(rl) + asrt.EqualValues(30, rl.Limit.RequestsPerUnit) + expectedKey = "test-domain.route_api.http_method_POST.subject_id" + asrt.Equal(expectedKey, rl.Stats.Key) + + // Test that stats are actually created with the correct keys + rl.Stats.TotalHits.Inc() + asrt.EqualValues(1, store.NewCounter(expectedKey+".total_hits").Value()) +} + +func TestValueToMetric_WithWildcard(t *testing.T) { + asrt := assert.New(t) + store := stats.NewStore(stats.NewNullSink(), false) + + cfg := []config.RateLimitConfigToLoad{ + { + Name: "inline", + ConfigYaml: &config.YamlRoot{ + Domain: "domain", + Descriptors: []config.YamlDescriptor{ + { + Key: "user", + ValueToMetric: true, + Descriptors: []config.YamlDescriptor{ + { + Key: "action", + Value: "read*", // Wildcard pattern + ValueToMetric: true, + Descriptors: []config.YamlDescriptor{ + { + Key: "resource", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 100, + Unit: "minute", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + rlConfig := config.NewRateLimitConfigImpl(cfg, mockstats.NewMockStatManager(store), false) + + // Test wildcard matching with value_to_metric - should include full runtime value + rl := rlConfig.GetLimit( + context.TODO(), "domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "user", Value: "alice"}, + {Key: "action", Value: "readfile"}, // Matches "read*" wildcard + {Key: "resource", Value: "documents"}, + }, + }, + ) + asrt.NotNil(rl) + asrt.EqualValues(100, rl.Limit.RequestsPerUnit) + // Both user and action should include their full runtime values due to value_to_metric + expectedKey := "domain.user_alice.action_readfile.resource" + asrt.Equal(expectedKey, rl.Stats.Key) + + // Test another wildcard match + rl = rlConfig.GetLimit( + context.TODO(), "domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "user", Value: "bob"}, + {Key: "action", Value: "readdata"}, // Also matches "read*" wildcard + {Key: "resource", Value: "database"}, + }, + }, + ) + asrt.NotNil(rl) + expectedKey = "domain.user_bob.action_readdata.resource" + asrt.Equal(expectedKey, rl.Stats.Key) + + // Test that stats are actually created with the correct keys + rl.Stats.TotalHits.Inc() + asrt.EqualValues(1, store.NewCounter(expectedKey+".total_hits").Value()) +} + +func TestValueToMetric_WithEmptyValue(t *testing.T) { + asrt := assert.New(t) + store := stats.NewStore(stats.NewNullSink(), false) + + cfg := []config.RateLimitConfigToLoad{ + { + Name: "inline", + ConfigYaml: &config.YamlRoot{ + Domain: "domain", + Descriptors: []config.YamlDescriptor{ + { + Key: "route", + ValueToMetric: true, + Descriptors: []config.YamlDescriptor{ + { + Key: "http_method", + ValueToMetric: true, + Descriptors: []config.YamlDescriptor{ + { + Key: "subject_id", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 60, + Unit: "minute", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + rlConfig := config.NewRateLimitConfigImpl(cfg, mockstats.NewMockStatManager(store), false) + + // Test with empty value for route - should not include underscore and empty value + rl := rlConfig.GetLimit( + context.TODO(), "domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "route", Value: ""}, // Empty value + {Key: "http_method", Value: "GET"}, + {Key: "subject_id", Value: "123"}, + }, + }, + ) + asrt.NotNil(rl) + + // Should not include underscore and empty value for route + expectedKey := "domain.route.http_method_GET.subject_id" + asrt.Equal(expectedKey, rl.Stats.Key) + + // Test with empty value for http_method - should not include underscore and empty value + rl = rlConfig.GetLimit( + context.TODO(), "domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "route", Value: "draw"}, + {Key: "http_method", Value: ""}, // Empty value + {Key: "subject_id", Value: "123"}, + }, + }, + ) + asrt.NotNil(rl) + + // Should not include underscore and empty value for http_method + expectedKey = "domain.route_draw.http_method.subject_id" + asrt.Equal(expectedKey, rl.Stats.Key) + + // Test with empty value for both - should not include underscores and empty values + rl = rlConfig.GetLimit( + context.TODO(), "domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "route", Value: ""}, // Empty value + {Key: "http_method", Value: ""}, // Empty value + {Key: "subject_id", Value: "123"}, + }, + }, + ) + asrt.NotNil(rl) + + // Should not include underscores and empty values + expectedKey = "domain.route.http_method.subject_id" + asrt.Equal(expectedKey, rl.Stats.Key) + + // Increment counters to ensure the keys are actually used in stats + rl.Stats.TotalHits.Inc() + rl.Stats.WithinLimit.Inc() + + asrt.EqualValues(1, store.NewCounter(expectedKey+".total_hits").Value()) + asrt.EqualValues(1, store.NewCounter(expectedKey+".within_limit").Value()) +} + +// TestValueToMetric_FullKeyMatchesStatsKey verifies that rateLimit.FullKey always matches +// rateLimit.Stats.Key. This is important for debugging and log/metric correlation. +// FullKey is used in debug logs, while Stats.Key is used for actual metrics. +func TestValueToMetric_FullKeyMatchesStatsKey(t *testing.T) { + asrt := assert.New(t) + store := stats.NewStore(stats.NewNullSink(), false) + + cfg := []config.RateLimitConfigToLoad{ + { + Name: "inline", + ConfigYaml: &config.YamlRoot{ + Domain: "test-domain", + Descriptors: []config.YamlDescriptor{ + { + Key: "route", + ValueToMetric: true, + Descriptors: []config.YamlDescriptor{ + { + Key: "http_method", + ValueToMetric: true, + Descriptors: []config.YamlDescriptor{ + { + Key: "subject_id", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 60, + Unit: "minute", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + rlConfig := config.NewRateLimitConfigImpl(cfg, mockstats.NewMockStatManager(store), false) + + // Test case 1: value_to_metric enabled - FullKey should match Stats.Key + rl := rlConfig.GetLimit( + context.TODO(), "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "route", Value: "api"}, + {Key: "http_method", Value: "GET"}, + {Key: "subject_id", Value: "user123"}, + }, + }, + ) + asrt.NotNil(rl) + asrt.Equal(rl.FullKey, rl.Stats.Key, "FullKey should match Stats.Key when value_to_metric is enabled") + expectedKey := "test-domain.route_api.http_method_GET.subject_id" + asrt.Equal(expectedKey, rl.FullKey) + asrt.Equal(expectedKey, rl.Stats.Key) + + // Test case 2: value_to_metric disabled - FullKey should match Stats.Key + cfgNoValueToMetric := []config.RateLimitConfigToLoad{ + { + Name: "inline", + ConfigYaml: &config.YamlRoot{ + Domain: "test-domain-2", + Descriptors: []config.YamlDescriptor{ + { + Key: "route", + Descriptors: []config.YamlDescriptor{ + { + Key: "http_method", + Descriptors: []config.YamlDescriptor{ + { + Key: "subject_id", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 60, + Unit: "minute", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + rlConfig2 := config.NewRateLimitConfigImpl(cfgNoValueToMetric, mockstats.NewMockStatManager(store), false) + rl2 := rlConfig2.GetLimit( + context.TODO(), "test-domain-2", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "route", Value: "api"}, + {Key: "http_method", Value: "GET"}, + {Key: "subject_id", Value: "user123"}, + }, + }, + ) + asrt.NotNil(rl2) + asrt.Equal(rl2.FullKey, rl2.Stats.Key, "FullKey should match Stats.Key even when value_to_metric is disabled") + + // Test case 3: detailed_metric enabled - FullKey should match Stats.Key + cfgDetailedMetric := []config.RateLimitConfigToLoad{ + { + Name: "inline", + ConfigYaml: &config.YamlRoot{ + Domain: "test-domain-3", + Descriptors: []config.YamlDescriptor{ + { + Key: "route", + Descriptors: []config.YamlDescriptor{ + { + Key: "http_method", + Descriptors: []config.YamlDescriptor{ + { + Key: "subject_id", + DetailedMetric: true, + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 60, + Unit: "minute", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + rlConfig3 := config.NewRateLimitConfigImpl(cfgDetailedMetric, mockstats.NewMockStatManager(store), false) + rl3 := rlConfig3.GetLimit( + context.TODO(), "test-domain-3", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "route", Value: "api"}, + {Key: "http_method", Value: "GET"}, + {Key: "subject_id", Value: "user123"}, + }, + }, + ) + asrt.NotNil(rl3) + asrt.Equal(rl3.FullKey, rl3.Stats.Key, "FullKey should match Stats.Key when detailed_metric is enabled") +} + +// TestShareThreshold tests config (ShareThresholdKeyPattern) and metrics (Stats.Key) +// Cache key generation is tested in base_limiter_test.go +func TestShareThreshold(t *testing.T) { + asrt := assert.New(t) + stats := stats.NewStore(stats.NewNullSink(), false) + rlConfig := config.NewRateLimitConfigImpl(loadFile("share_threshold.yaml"), mockstats.NewMockStatManager(stats), false) + + // Test Case 1: Basic share_threshold functionality + t.Run("Basic share_threshold", func(t *testing.T) { + testValues := []string{"files/a.pdf", "files/b.csv", "files/c.txt"} + var rateLimits []*config.RateLimit + + for _, value := range testValues { + rl := rlConfig.GetLimit( + context.TODO(), "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "files", Value: value}}, + }) + asrt.NotNil(rl) + // Verify config: ShareThresholdKeyPattern is set correctly + asrt.Equal("files/*", rl.ShareThresholdKeyPattern[0]) + asrt.EqualValues(100, rl.Limit.RequestsPerUnit) + // Verify stats: All should have the same stats key with wildcard pattern + // share_threshold: stats key includes wildcard pattern with * + asrt.Equal("test-domain.files_files/*", rl.Stats.Key) + rateLimits = append(rateLimits, rl) + } + + // Verify all have same stats key + for i := 1; i < len(rateLimits); i++ { + asrt.Equal(rateLimits[0].Stats.Key, rateLimits[i].Stats.Key, "All values should have same stats key") + asrt.Equal(rateLimits[i].Stats.Key, rateLimits[i].FullKey, "FullKey should match Stats.Key for all rate limits") + } + }) + + // Test Case 2: share_threshold with metrics (value_to_metric and detailed_metric) + // Tests that metrics correctly include wildcard prefix when share_threshold is enabled + t.Run("share_threshold with metrics", func(t *testing.T) { + // Test value_to_metric with share_threshold + // share_threshold takes priority: should use wildcard pattern with * + rl1 := rlConfig.GetLimit( + context.TODO(), "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "route", Value: "api/v1"}, + {Key: "method", Value: "GET"}, + }, + }) + asrt.NotNil(rl1) + asrt.Equal("api/*", rl1.ShareThresholdKeyPattern[0]) + asrt.Equal("test-domain.route_api/*.method", rl1.Stats.Key, "share_threshold takes priority over value_to_metric, should use wildcard pattern with *") + asrt.Equal(rl1.Stats.Key, rl1.FullKey, "FullKey should match Stats.Key") + + // Test detailed_metric with share_threshold + // share_threshold takes priority: should use wildcard pattern with * + rl2 := rlConfig.GetLimit( + context.TODO(), "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "service", Value: "svc/user"}, + {Key: "endpoint", Value: "get"}, + }, + }) + asrt.NotNil(rl2) + asrt.Equal("svc/*", rl2.ShareThresholdKeyPattern[0]) + asrt.True(rl2.DetailedMetric) + asrt.Equal("test-domain.service_svc/*.endpoint_get", rl2.Stats.Key, "share_threshold takes priority over detailed_metric, should use wildcard pattern with *") + asrt.Equal(rl2.Stats.Key, rl2.FullKey, "FullKey should match Stats.Key") + }) + + // Test Case 3: Nested wildcards with share_threshold + t.Run("Nested wildcards", func(t *testing.T) { + // Nested share_threshold + rl1 := rlConfig.GetLimit( + context.TODO(), "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "nested", Value: "parent"}, + {Key: "files", Value: "nested/file1.txt"}, + }, + }) + asrt.NotNil(rl1) + asrt.Equal("nested/*", rl1.ShareThresholdKeyPattern[1]) + asrt.EqualValues(200, rl1.Limit.RequestsPerUnit) + // Verify stats key includes wildcard pattern for nested share_threshold + asrt.Equal("test-domain.nested_parent.files_nested/*", rl1.Stats.Key, "Nested share_threshold should include wildcard pattern with *") + asrt.Equal(rl1.Stats.Key, rl1.FullKey, "FullKey should match Stats.Key") + + // Multiple wildcards + rl2 := rlConfig.GetLimit( + context.TODO(), "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "files", Value: "top/file1.txt"}, + {Key: "files", Value: "nested/file1.txt"}, + }, + }) + asrt.NotNil(rl2) + asrt.Equal("top/*", rl2.ShareThresholdKeyPattern[0]) + asrt.Equal("nested/*", rl2.ShareThresholdKeyPattern[1]) + // Verify stats key includes wildcard patterns for multiple share_threshold entries + asrt.Equal("test-domain.files_top/*.files_nested/*", rl2.Stats.Key, "Multiple share_threshold entries should include all wildcard patterns") + asrt.Equal(rl2.Stats.Key, rl2.FullKey, "FullKey should match Stats.Key") + + // Parent has share_threshold, child does not + rl3 := rlConfig.GetLimit( + context.TODO(), "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "path", Value: "path/foo"}, + {Key: "file", Value: "file/doc1"}, + {Key: "type", Value: "pdf"}, + }, + }) + asrt.NotNil(rl3) + asrt.Equal("path/*", rl3.ShareThresholdKeyPattern[0]) + asrt.Equal("", rl3.ShareThresholdKeyPattern[1]) + // Verify stats key: parent has share_threshold (wildcard pattern), child is wildcard without share_threshold (preserves original behavior with *) + // Note: file has wildcard pattern file/* in config but share_threshold: false, so it preserves original wildcard behavior + asrt.Equal("test-domain.path_path/*.file_file/*.type", rl3.Stats.Key, "Parent with share_threshold uses wildcard pattern, child wildcard without share_threshold preserves original behavior") + asrt.Equal(rl3.Stats.Key, rl3.FullKey, "FullKey should match Stats.Key") + + // Both parent and child have share_threshold + rl4 := rlConfig.GetLimit( + context.TODO(), "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "user", Value: "user/alice"}, + {Key: "resource", Value: "res/file1"}, + {Key: "action", Value: "read"}, + }, + }) + asrt.NotNil(rl4) + asrt.Equal("user/*", rl4.ShareThresholdKeyPattern[0]) + asrt.Equal("res/*", rl4.ShareThresholdKeyPattern[1]) + // Verify stats key includes wildcard patterns for both parent and child with share_threshold + // Note: action is just a key with value, no wildcard, so it doesn't include the value in stats key (no value_to_metric or detailed_metric) + asrt.Equal("test-domain.user_user/*.resource_res/*.action", rl4.Stats.Key, "Both parent and child with share_threshold should include wildcard patterns, action key without flags uses just the key") + asrt.Equal(rl4.Stats.Key, rl4.FullKey, "FullKey should match Stats.Key") + }) + + // Test Case 4: Explicit value takes precedence over wildcard + // Verify: file_test1 uses isolated threshold, file_test2/test23/test123 share threshold + t.Run("Explicit value precedence over wildcard", func(t *testing.T) { + // Test explicit match: file_test1 should use isolated threshold (50/min) + rlTest1 := rlConfig.GetLimit( + context.TODO(), "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "file", Value: "test1"}, + }, + }) + asrt.NotNil(rlTest1, "Should find rate limit for file=test1") + asrt.EqualValues(50, rlTest1.Limit.RequestsPerUnit, "test1 should have isolated limit of 50") + // ShareThresholdKeyPattern should be nil for explicit matches (lazy initialization) + asrt.Nil(rlTest1.ShareThresholdKeyPattern, "ShareThresholdKeyPattern should be nil for explicit matches (no share_threshold)") + // Verify Stats: test1 should have its own stats key + asrt.Equal("test-domain.file_test1", rlTest1.Stats.Key, "test1 should have its own stats key") + + // Test wildcard matches: file_test2, file_test23, file_test123 should share threshold (100/min) + testWildcardValues := []string{"test2", "test23", "test123"} + var rateLimits []*config.RateLimit + + for _, value := range testWildcardValues { + rl := rlConfig.GetLimit( + context.TODO(), "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "file", Value: value}, + }, + }) + asrt.NotNil(rl, "Should find rate limit for file=%s", value) + + // Verify RateLimitConfig: Should have share_threshold pattern + asrt.NotNil(rl.ShareThresholdKeyPattern, "ShareThresholdKeyPattern should be set for %s", value) + asrt.Equal(1, len(rl.ShareThresholdKeyPattern), "Should have one pattern") + asrt.Equal("test*", rl.ShareThresholdKeyPattern[0], "Pattern should be test*") + asrt.EqualValues(100, rl.Limit.RequestsPerUnit, "Should have shared limit of 100") + + // Verify Stats: All wildcard matches should have the same stats key with wildcard pattern + asrt.Equal("test-domain.file_test*", rl.Stats.Key, "Stats key should include wildcard pattern with * when share_threshold is true") + + rateLimits = append(rateLimits, rl) + } + + // Verify all wildcard matches have same stats key + for i := 1; i < len(rateLimits); i++ { + asrt.Equal(rateLimits[0].Stats.Key, rateLimits[i].Stats.Key, "All wildcard values should have same stats key") + } + + // Verify stats counters are shared for wildcard matches + for _, rl := range rateLimits { + rl.Stats.TotalHits.Inc() + } + counterValue := stats.NewCounter("test-domain.file_test*.total_hits").Value() + asrt.GreaterOrEqual(counterValue, uint64(len(testWildcardValues)), "All wildcard values should increment the same counter") + + // test1 should have its own counter + rlTest1.Stats.TotalHits.Inc() + counterTest1 := stats.NewCounter("test-domain.file_test1.total_hits").Value() + asrt.GreaterOrEqual(counterTest1, uint64(1), "test1 should increment its own counter") + }) +} + +// TestWildcardStatsBehavior verifies the stats key behavior for wildcards under different flag combinations +func TestWildcardStatsBehavior(t *testing.T) { + asrt := assert.New(t) + store := stats.NewStore(stats.NewNullSink(), false) + + t.Run("No flags - preserves original behavior with wildcard pattern", func(t *testing.T) { + cfg := []config.RateLimitConfigToLoad{ + { + Name: "inline", + ConfigYaml: &config.YamlRoot{ + Domain: "test-domain", + Descriptors: []config.YamlDescriptor{ + { + Key: "wild", + Value: "foo*", + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 20, + Unit: "minute", + }, + }, + }, + }, + }, + } + + rlConfig := config.NewRateLimitConfigImpl(cfg, mockstats.NewMockStatManager(store), false) + rl := rlConfig.GetLimit(context.TODO(), "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "wild", Value: "foo1"}}, + }) + asrt.NotNil(rl) + asrt.Equal("test-domain.wild_foo*", rl.Stats.Key) + asrt.Equal(rl.Stats.Key, rl.FullKey) + }) + + t.Run("value_to_metric - uses runtime value", func(t *testing.T) { + cfg := []config.RateLimitConfigToLoad{ + { + Name: "inline", + ConfigYaml: &config.YamlRoot{ + Domain: "test-domain", + Descriptors: []config.YamlDescriptor{ + { + Key: "wild", + Value: "foo*", + ValueToMetric: true, + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 20, + Unit: "minute", + }, + }, + }, + }, + }, + } + + rlConfig := config.NewRateLimitConfigImpl(cfg, mockstats.NewMockStatManager(store), false) + rl := rlConfig.GetLimit(context.TODO(), "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "wild", Value: "foo1"}}, + }) + asrt.NotNil(rl) + asrt.Equal("test-domain.wild_foo1", rl.Stats.Key) + }) + + t.Run("share_threshold - uses wildcard pattern with *", func(t *testing.T) { + cfg := []config.RateLimitConfigToLoad{ + { + Name: "inline", + ConfigYaml: &config.YamlRoot{ + Domain: "test-domain", + Descriptors: []config.YamlDescriptor{ + { + Key: "wild", + Value: "foo*", + ShareThreshold: true, + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 20, + Unit: "minute", + }, + }, + }, + }, + }, + } + + rlConfig := config.NewRateLimitConfigImpl(cfg, mockstats.NewMockStatManager(store), false) + rl := rlConfig.GetLimit(context.TODO(), "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "wild", Value: "foo1"}}, + }) + asrt.NotNil(rl) + asrt.Equal("test-domain.wild_foo*", rl.Stats.Key) + asrt.Equal(rl.Stats.Key, rl.FullKey) + }) + + t.Run("share_threshold with value_to_metric - share_threshold takes priority", func(t *testing.T) { + cfg := []config.RateLimitConfigToLoad{ + { + Name: "inline", + ConfigYaml: &config.YamlRoot{ + Domain: "test-domain", + Descriptors: []config.YamlDescriptor{ + { + Key: "wild", + Value: "foo*", + ShareThreshold: true, + ValueToMetric: true, + RateLimit: &config.YamlRateLimit{ + RequestsPerUnit: 20, + Unit: "minute", + }, + }, + }, + }, + }, + } + + rlConfig := config.NewRateLimitConfigImpl(cfg, mockstats.NewMockStatManager(store), false) + rl := rlConfig.GetLimit(context.TODO(), "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "wild", Value: "foo1"}}, + }) + asrt.NotNil(rl) + asrt.Equal("test-domain.wild_foo*", rl.Stats.Key) + asrt.Equal(rl.Stats.Key, rl.FullKey) + }) + + t.Run("value_to_metric in other descriptor - wildcard pattern preserved", func(t *testing.T) { + cfg := []config.RateLimitConfigToLoad{ + { + Name: "inline", + ConfigYaml: &config.YamlRoot{ + Domain: "test-domain", + Descriptors: []config.YamlDescriptor{ + { + Key: "wild", + Value: "foo*", + Descriptors: []config.YamlDescriptor{ + { + Key: "other", + ValueToMetric: true, + Descriptors: []config.YamlDescriptor{ + { + Key: "limit", + RateLimit: &config.YamlRateLimit{RequestsPerUnit: 20, Unit: "minute"}, + }, + }, + }, + }, + }, + }, + }, + }, + } + + rlConfig := config.NewRateLimitConfigImpl(cfg, mockstats.NewMockStatManager(store), false) + rl := rlConfig.GetLimit( + context.TODO(), "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{ + {Key: "wild", Value: "foo1"}, + {Key: "other", Value: "bar"}, + {Key: "limit", Value: "X"}, + }, + }, + ) + asrt.NotNil(rl) + // Wildcard pattern should be preserved even though value_to_metric is enabled on "other" + asrt.Equal("test-domain.wild_foo*.other_bar.limit", rl.Stats.Key, "Should preserve wildcard pattern when value_to_metric is only on other descriptor") + asrt.Equal(rl.Stats.Key, rl.FullKey, "FullKey should match Stats.Key") + }) +} diff --git a/test/config/share_threshold.yaml b/test/config/share_threshold.yaml new file mode 100644 index 000000000..c601576aa --- /dev/null +++ b/test/config/share_threshold.yaml @@ -0,0 +1,87 @@ +# Configuration for testing share_threshold feature +domain: test-domain +descriptors: + - key: files + value: files/* + share_threshold: true + rate_limit: + unit: minute + requests_per_unit: 100 + - key: nested + value: parent + descriptors: + - key: files + value: nested/* + share_threshold: true + rate_limit: + unit: minute + requests_per_unit: 200 + # Test case for nested descriptors with same key name at different levels + - key: files + value: top/* + share_threshold: true + descriptors: + - key: files + value: nested/* + share_threshold: true + rate_limit: + unit: minute + requests_per_unit: 300 + # Test case for share_threshold with value_to_metric + - key: route + value: api/* + share_threshold: true + value_to_metric: true + descriptors: + - key: method + rate_limit: + unit: minute + requests_per_unit: 60 + # Test case for share_threshold with detailed_metric + - key: service + value: svc/* + share_threshold: true + descriptors: + - key: endpoint + detailed_metric: true + rate_limit: + unit: minute + requests_per_unit: 80 + # Test case for nested descriptors with multiple wildcards - mixed share_threshold + - key: path + value: path/* + share_threshold: true + descriptors: + - key: file + value: file/* + share_threshold: false + descriptors: + - key: type + rate_limit: + unit: minute + requests_per_unit: 120 + # Test case for nested descriptors with multiple wildcards - both with share_threshold + - key: user + value: user/* + share_threshold: true + descriptors: + - key: resource + value: res/* + share_threshold: true + descriptors: + - key: action + rate_limit: + unit: minute + requests_per_unit: 150 + # Test case: explicit value takes precedence over wildcard with share_threshold + - key: file + value: test1 + rate_limit: + unit: minute + requests_per_unit: 50 + - key: file + value: test* + share_threshold: true + rate_limit: + unit: minute + requests_per_unit: 100 diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 7239e7c35..df5e6fe6d 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -4,6 +4,7 @@ package integration_test import ( "crypto/tls" + "crypto/x509" "fmt" "io" "math/rand" @@ -14,12 +15,12 @@ import ( "time" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/golang/protobuf/ptypes/duration" "github.com/kelseyhightower/envconfig" "github.com/stretchr/testify/assert" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/protobuf/types/known/durationpb" "github.com/envoyproxy/ratelimit/src/memcached" "github.com/envoyproxy/ratelimit/src/service_cmd/runner" @@ -59,14 +60,14 @@ func defaultSettings() settings.Settings { return s } -func newDescriptorStatus(status pb.RateLimitResponse_Code, requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, limitRemaining uint32, durRemaining *duration.Duration) *pb.RateLimitResponse_DescriptorStatus { +func newDescriptorStatus(status pb.RateLimitResponse_Code, requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, limitRemaining uint32, durRemaining *durationpb.Duration) *pb.RateLimitResponse_DescriptorStatus { limit := &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit} return &pb.RateLimitResponse_DescriptorStatus{ Code: status, CurrentLimit: limit, LimitRemaining: limitRemaining, - DurationUntilReset: &duration.Duration{Seconds: durRemaining.GetSeconds()}, + DurationUntilReset: &durationpb.Duration{Seconds: durRemaining.GetSeconds()}, } } @@ -294,6 +295,92 @@ func Test_mTLS(t *testing.T) { defer conn.Close() } +func TestReloadGRPCServerCerts(t *testing.T) { + common.WithMultiRedis(t, []common.RedisConfig{ + {Port: 6383}, + }, func() { + s := makeSimpleRedisSettings(6383, 6380, false, 0) + assert := assert.New(t) + // TLS setup initially used to configure the server + initialServerCAFile, initialServerCertFile, initialServerCertKey, err := mTLSSetup(utils.ServerCA) + assert.NoError(err) + // Second TLS setup that will replace the above during test + newServerCAFile, newServerCertFile, newServerCertKey, err := mTLSSetup(utils.ServerCA) + assert.NoError(err) + // Create CertPools and tls.Configs for both CAs + initialCaCert, err := os.ReadFile(initialServerCAFile) + assert.NoError(err) + initialCertPool := x509.NewCertPool() + initialCertPool.AppendCertsFromPEM(initialCaCert) + initialTlsConfig := &tls.Config{ + RootCAs: initialCertPool, + } + newCaCert, err := os.ReadFile(newServerCAFile) + assert.NoError(err) + newCertPool := x509.NewCertPool() + newCertPool.AppendCertsFromPEM(newCaCert) + newTlsConfig := &tls.Config{ + RootCAs: newCertPool, + } + connStr := fmt.Sprintf("localhost:%v", s.GrpcPort) + + // Set up ratelimit with the initial certificate + s.GrpcServerUseTLS = true + s.GrpcServerTlsCert = initialServerCertFile + s.GrpcServerTlsKey = initialServerCertKey + settings.GrpcServerTlsConfig()(&s) + runner := startTestRunner(t, s) + defer runner.Stop() + + // Ensure TLS validation works with the initial CA in cert pool + t.Run("WithInitialCert", func(t *testing.T) { + conn, err := tls.Dial("tcp", connStr, initialTlsConfig) + assert.NoError(err) + conn.Close() + }) + + // Ensure TLS validation fails with the new CA in cert pool + t.Run("WithNewCertFail", func(t *testing.T) { + conn, err := tls.Dial("tcp", connStr, newTlsConfig) + assert.Error(err) + if err == nil { + conn.Close() + } + }) + + // Replace the initial certificate with the new one + err = os.Rename(newServerCertFile, initialServerCertFile) + assert.NoError(err) + err = os.Rename(newServerCertKey, initialServerCertKey) + assert.NoError(err) + + // Ensure TLS validation works with the new CA in cert pool + t.Run("WithNewCertOK", func(t *testing.T) { + // If this takes longer than 10s, something is probably wrong + wait := 10 + for i := 0; i < wait; i++ { + // Ensure the new certificate is being used + conn, err := tls.Dial("tcp", connStr, newTlsConfig) + if err == nil { + conn.Close() + break + } + time.Sleep(1 * time.Second) + } + assert.NoError(err) + }) + + // Ensure TLS validation fails with the initial CA in cert pool + t.Run("WithInitialCertFail", func(t *testing.T) { + conn, err := tls.Dial("tcp", connStr, initialTlsConfig) + assert.Error(err) + if err == nil { + conn.Close() + } + }) + }) +} + func testBasicConfigAuthTLS(perSecond bool, local_cache_size int) func(*testing.T) { s := makeSimpleRedisSettings(16381, 16382, perSecond, local_cache_size) s.RedisTlsConfig = &tls.Config{} @@ -384,8 +471,9 @@ func configRedisCluster(s *settings.Settings) { s.RedisAuth = "password123" s.RedisPerSecondAuth = "password123" - s.RedisPerSecondPipelineLimit = 8 - s.RedisPipelineLimit = 8 + // RedisPipelineLimit is deprecated in radix v4, use RedisPipelineWindow instead + s.RedisPerSecondPipelineWindow = 150 * time.Microsecond + s.RedisPipelineWindow = 150 * time.Microsecond } func testBasicConfigWithoutWatchRootWithRedisCluster(perSecond bool, local_cache_size int) func(*testing.T) { @@ -709,7 +797,8 @@ func startTestRunner(t *testing.T, s settings.Settings) *runner.Runner { }() // HACK: Wait for the server to come up. Make a hook that we can wait on. - common.WaitForTcpPort(context.Background(), s.GrpcPort, 1*time.Second) + // Increased timeout from 1s to 10s to allow for Redis cluster connection initialization + common.WaitForTcpPort(context.Background(), s.GrpcPort, 10*time.Second) return &runner } @@ -821,3 +910,82 @@ func waitForConfigReload(runner *runner.Runner, loadCountBefore uint64) (uint64, } return loadCountAfter, reloaded } + +func TestShareThreshold(t *testing.T) { + common.WithMultiRedis(t, []common.RedisConfig{ + {Port: 6383}, + {Port: 6380}, + }, func() { + t.Run("WithoutPerSecondRedis", testShareThreshold(makeSimpleRedisSettings(6383, 6380, false, 0))) + }) +} + +func testShareThreshold(s settings.Settings) func(*testing.T) { + return func(t *testing.T) { + runner := startTestRunner(t, s) + defer runner.Stop() + + assert := assert.New(t) + conn, err := grpc.Dial(fmt.Sprintf("localhost:%v", s.GrpcPort), grpc.WithInsecure()) + assert.NoError(err) + defer conn.Close() + c := pb.NewRateLimitServiceClient(conn) + + // Use the domain from the config file + domain := "share-threshold-test" + + // Test Case 1: share_threshold: true - different values matching files/* should share the same threshold + // Make 10 requests with files/a.pdf - can be OK or OVER_LIMIT + for i := 0; i < 10; i++ { + response, err := c.ShouldRateLimit( + context.Background(), + common.NewRateLimitRequest(domain, [][][2]string{{{"files", "files/a.pdf"}}}, 1)) + assert.NoError(err) + // Each request can be OK or OVER_LIMIT (depending on when limit is reached) + assert.True(response.OverallCode == pb.RateLimitResponse_OK || response.OverallCode == pb.RateLimitResponse_OVER_LIMIT, + "Request %d should be OK or OVER_LIMIT, got: %v", i+1, response.OverallCode) + } + + // Now make a request with files/b.csv - must be OVER_LIMIT because it shares the threshold + response, err := c.ShouldRateLimit( + context.Background(), + common.NewRateLimitRequest(domain, [][][2]string{{{"files", "files/b.csv"}}}, 1)) + assert.NoError(err) + durRemaining := response.GetStatuses()[0].DurationUntilReset + common.AssertProtoEqual( + assert, + &pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OVER_LIMIT, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{ + newDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT, 10, pb.RateLimitResponse_RateLimit_HOUR, 0, durRemaining), + }, + }, + response) + + // Test Case 2: share_threshold: false - different values should have isolated thresholds + // Use random values with prefix files_no_share to ensure uniqueness (based on timestamp) + // Each value should have its own isolated threshold, so all 10 requests should be OK + baseTimestamp := time.Now().UnixNano() + r := rand.New(rand.NewSource(baseTimestamp)) + for i := 0; i < 10; i++ { + // Generate unique value using timestamp and random number to avoid collisions + uniqueValue := fmt.Sprintf("files_no_share/%d-%d", baseTimestamp, r.Int63()) + response, err := c.ShouldRateLimit( + context.Background(), + common.NewRateLimitRequest(domain, [][][2]string{{{"files_no_share", uniqueValue}}}, 1)) + assert.NoError(err) + // Each value has its own isolated threshold, so each request should have remaining = 9 (10 - 1) + expectedRemaining := uint32(9) + durRemaining := response.GetStatuses()[0].DurationUntilReset + common.AssertProtoEqual( + assert, + &pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OK, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{ + newDescriptorStatus(pb.RateLimitResponse_OK, 10, pb.RateLimitResponse_RateLimit_HOUR, expectedRemaining, durRemaining), + }, + }, + response) + } + } +} diff --git a/test/integration/runtime/current/ratelimit/config/share_threshold.yaml b/test/integration/runtime/current/ratelimit/config/share_threshold.yaml new file mode 100644 index 000000000..33b55dc4f --- /dev/null +++ b/test/integration/runtime/current/ratelimit/config/share_threshold.yaml @@ -0,0 +1,15 @@ +domain: share-threshold-test +descriptors: + # Test case 1: share_threshold: true - files/* should share threshold + - key: files + value: files/* + share_threshold: true + rate_limit: + unit: hour + requests_per_unit: 10 + + - key: files_no_share + value: files_no_share/* + rate_limit: + unit: hour + requests_per_unit: 10 diff --git a/test/limiter/base_limiter_test.go b/test/limiter/base_limiter_test.go index b3babcee9..7bc404079 100644 --- a/test/limiter/base_limiter_test.go +++ b/test/limiter/base_limiter_test.go @@ -29,9 +29,9 @@ func TestGenerateCacheKeys(t *testing.T) { timeSource.EXPECT().UnixNow().Return(int64(1234)) baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, false, "", nil, false)} assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value()) - cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, 1) + cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, []uint64{1}) assert.Equal(1, len(cacheKeys)) assert.Equal("domain_key_value_1234", cacheKeys[0].Key) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) @@ -48,14 +48,95 @@ func TestGenerateCacheKeysPrefix(t *testing.T) { timeSource.EXPECT().UnixNow().Return(int64(1234)) baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "prefix:", sm) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, false, "", nil, false)} assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value()) - cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, 1) + cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, []uint64{1}) assert.Equal(1, len(cacheKeys)) assert.Equal("prefix:domain_key_value_1234", cacheKeys[0].Key) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) } +func TestGenerateCacheKeysWithShareThreshold(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + timeSource := mock_utils.NewMockTimeSource(controller) + jitterSource := mock_utils.NewMockJitterRandSource(controller) + statsStore := stats.NewStore(stats.NewNullSink(), false) + sm := mockstats.NewMockStatManager(statsStore) + timeSource.EXPECT().UnixNow().Return(int64(1234)).AnyTimes() + baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8, "", sm) + + // Test 1: Simple case - different values with same wildcard prefix generate same cache key + limit := config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("files_files/*"), false, false, false, "", nil, false) + limit.ShareThresholdKeyPattern = []string{"files/*"} // Entry at index 0 + + request1 := common.NewRateLimitRequest("domain", [][][2]string{{{"files", "files/a.pdf"}}}, 1) + limits1 := []*config.RateLimit{limit} + cacheKeys1 := baseRateLimit.GenerateCacheKeys(request1, limits1, []uint64{1}) + assert.Equal(1, len(cacheKeys1)) + assert.Equal("domain_files_files/*_1234", cacheKeys1[0].Key) + + request2 := common.NewRateLimitRequest("domain", [][][2]string{{{"files", "files/b.csv"}}}, 1) + limits2 := []*config.RateLimit{limit} + cacheKeys2 := baseRateLimit.GenerateCacheKeys(request2, limits2, []uint64{1}) + assert.Equal(1, len(cacheKeys2)) + // Should generate the same cache key as the first request + assert.Equal("domain_files_files/*_1234", cacheKeys2[0].Key) + assert.Equal(cacheKeys1[0].Key, cacheKeys2[0].Key) + + // Test 2: Multiple different values all generate the same cache key + testValues := []string{"files/c.txt", "files/d.json", "files/e.xml", "files/subdir/f.txt"} + for _, value := range testValues { + request := common.NewRateLimitRequest("domain", [][][2]string{{{"files", value}}}, 1) + cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits1, []uint64{1}) + assert.Equal(1, len(cacheKeys)) + assert.Equal("domain_files_files/*_1234", cacheKeys[0].Key, "Value %s should generate same cache key", value) + } + + // Test 3: Nested descriptors with share_threshold at second level + limitNested := config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("parent_files_nested/*"), false, false, false, "", nil, false) + limitNested.ShareThresholdKeyPattern = []string{"", "nested/*"} // First entry no share_threshold, second entry has it + + request3a := common.NewRateLimitRequest("domain", [][][2]string{ + {{"parent", "value1"}, {"files", "nested/file1.txt"}}, + }, 1) + limits3a := []*config.RateLimit{limitNested} + cacheKeys3a := baseRateLimit.GenerateCacheKeys(request3a, limits3a, []uint64{1}) + assert.Equal(1, len(cacheKeys3a)) + assert.Equal("domain_parent_value1_files_nested/*_1234", cacheKeys3a[0].Key) + + request3b := common.NewRateLimitRequest("domain", [][][2]string{ + {{"parent", "value1"}, {"files", "nested/file2.csv"}}, + }, 1) + cacheKeys3b := baseRateLimit.GenerateCacheKeys(request3b, limits3a, []uint64{1}) + assert.Equal(1, len(cacheKeys3b)) + // Should generate the same cache key despite different file values + assert.Equal("domain_parent_value1_files_nested/*_1234", cacheKeys3b[0].Key) + assert.Equal(cacheKeys3a[0].Key, cacheKeys3b[0].Key) + + // Test 4: Multiple entries with share_threshold at different positions + limitMulti := config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("files_top/*_files_nested/*"), false, false, false, "", nil, false) + limitMulti.ShareThresholdKeyPattern = []string{"top/*", "nested/*"} // Both entries have share_threshold + + request4a := common.NewRateLimitRequest("domain", [][][2]string{ + {{"files", "top/file1.txt"}, {"files", "nested/sub1.txt"}}, + }, 1) + limits4a := []*config.RateLimit{limitMulti} + cacheKeys4a := baseRateLimit.GenerateCacheKeys(request4a, limits4a, []uint64{1}) + assert.Equal(1, len(cacheKeys4a)) + assert.Equal("domain_files_top/*_files_nested/*_1234", cacheKeys4a[0].Key) + + request4b := common.NewRateLimitRequest("domain", [][][2]string{ + {{"files", "top/file2.pdf"}, {"files", "nested/sub2.csv"}}, + }, 1) + cacheKeys4b := baseRateLimit.GenerateCacheKeys(request4b, limits4a, []uint64{1}) + assert.Equal(1, len(cacheKeys4b)) + // Should generate the same cache key despite different values + assert.Equal("domain_files_top/*_files_nested/*_1234", cacheKeys4b[0].Key) + assert.Equal(cacheKeys4a[0].Key, cacheKeys4b[0].Key) +} + func TestOverLimitWithLocalCache(t *testing.T) { assert := assert.New(t) controller := gomock.NewController(t) @@ -102,7 +183,7 @@ func TestGetResponseStatusOverLimitWithLocalCache(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, false, "", nil, false)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 4, 5) // As `isOverLimitWithLocalCache` is passed as `true`, immediate response is returned with no checks of the limits. responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, true, 2) @@ -125,7 +206,7 @@ func TestGetResponseStatusOverLimitWithLocalCacheShadowMode(t *testing.T) { sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) // This limit is in ShadowMode - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true, "", nil, false)} + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true, false, "", nil, false)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 4, 5) // As `isOverLimitWithLocalCache` is passed as `true`, immediate response is returned with no checks of the limits. responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, true, 2) @@ -149,7 +230,7 @@ func TestGetResponseStatusOverLimit(t *testing.T) { localCache := freecache.NewCache(100) sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, localCache, 0.8, "", sm) - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, false, "", nil, false)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 7, 4, 5) responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) assert.Equal(pb.RateLimitResponse_OVER_LIMIT, responseStatus.GetCode()) @@ -175,7 +256,7 @@ func TestGetResponseStatusOverLimitShadowMode(t *testing.T) { sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, localCache, 0.8, "", sm) // Key is in shadow_mode: true - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true, "", nil, false)} + limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true, false, "", nil, false)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 7, 4, 5) responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) @@ -197,7 +278,7 @@ func TestGetResponseStatusBelowLimit(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, false, "", nil, false)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 9, 10) responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) @@ -218,7 +299,7 @@ func TestGetResponseStatusBelowLimitShadowMode(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) sm := mockstats.NewMockStatManager(statsStore) baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8, "", sm) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true, "", nil, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, true, false, "", nil, false)} limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 9, 10) responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) diff --git a/test/memcached/cache_impl_test.go b/test/memcached/cache_impl_test.go index 14022ecbd..606a9d844 100644 --- a/test/memcached/cache_impl_test.go +++ b/test/memcached/cache_impl_test.go @@ -53,7 +53,7 @@ func TestMemcached(t *testing.T) { client.EXPECT().Increment("domain_key_value_1234", uint64(1)).Return(uint64(5), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -77,7 +77,7 @@ func TestMemcached(t *testing.T) { }, 1) limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false, "", nil, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false, false, "", nil, false), } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -102,17 +102,17 @@ func TestMemcached(t *testing.T) { nil, ) client.EXPECT().Increment("domain_key3_value3_997200", uint64(1)).Return(uint64(11), nil) - client.EXPECT().Increment("domain_key3_value3_subkey3_subvalue3_950400", uint64(1)).Return(uint64(13), nil) + client.EXPECT().Increment("domain_key3_value3_subkey3_subvalue3_950400", uint64(2)).Return(uint64(13), nil) - request = common.NewRateLimitRequest( + request = common.NewRateLimitRequestWithPerDescriptorHitsAddend( "domain", [][][2]string{ {{"key3", "value3"}}, {{"key3", "value3"}, {"subkey3", "subvalue3"}}, - }, 1) + }, []uint64{1, 2}) limits = []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false, false, "", nil, false), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false, "", nil, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false, false, false, "", nil, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false, false, "", nil, false), } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -124,10 +124,10 @@ func TestMemcached(t *testing.T) { assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.WithinLimit.Value()) - assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) - assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) - assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) - assert.Equal(uint64(0), limits[0].Stats.WithinLimit.Value()) + assert.Equal(uint64(2), limits[1].Stats.TotalHits.Value()) + assert.Equal(uint64(2), limits[1].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[1].Stats.NearLimit.Value()) + assert.Equal(uint64(0), limits[1].Stats.WithinLimit.Value()) cache.Flush() } @@ -150,7 +150,7 @@ func TestMemcachedGetError(t *testing.T) { client.EXPECT().Increment("domain_key_value_1234", uint64(1)).Return(uint64(5), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -168,7 +168,7 @@ func TestMemcachedGetError(t *testing.T) { client.EXPECT().Increment("domain_key_value1_1234", uint64(1)).Return(uint64(5), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value1"}}}, 1) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value1"), false, false, "", nil, false)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value1"), false, false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -183,7 +183,8 @@ func TestMemcachedGetError(t *testing.T) { func testLocalCacheStats(localCacheStats stats.StatGenerator, statsStore stats.Store, sink *common.TestStatSink, expectedHitCount int, expectedMissCount int, expectedLookUpCount int, expectedExpiredCount int, - expectedEntryCount int) func(*testing.T) { + expectedEntryCount int, +) func(*testing.T) { return func(t *testing.T) { localCacheStats.GenerateStats() statsStore.Flush() @@ -241,7 +242,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, "", nil, false), + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, false, "", nil, false), } assert.Equal( @@ -342,7 +343,7 @@ func TestNearLimit(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, "", nil, false), + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, false, "", nil, false), } assert.Equal( @@ -399,7 +400,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key5_value5_1234", uint64(3)).Return(uint64(5), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"), false, false, "", nil, false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"), false, false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -417,7 +418,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key6_value6_1234", uint64(2)).Return(uint64(7), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) - limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"), false, false, "", nil, false)} + limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"), false, false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -435,7 +436,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key7_value7_1234", uint64(3)).Return(uint64(19), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"), false, false, "", nil, false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"), false, false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -453,7 +454,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key8_value8_1234", uint64(3)).Return(uint64(22), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"), false, false, "", nil, false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"), false, false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -471,7 +472,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key9_value9_1234", uint64(7)).Return(uint64(22), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"), false, false, "", nil, false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"), false, false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -489,7 +490,7 @@ func TestNearLimit(t *testing.T) { client.EXPECT().Increment("domain_key10_value10_1234", uint64(3)).Return(uint64(30), nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"), false, false, "", nil, false)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"), false, false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -533,7 +534,7 @@ func TestMemcacheWithJitter(t *testing.T) { ).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -576,7 +577,7 @@ func TestMemcacheAdd(t *testing.T) { uint64(2), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -600,7 +601,7 @@ func TestMemcacheAdd(t *testing.T) { ).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key2", "value2"}}}, 1) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2"), false, false, "", nil, false)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2"), false, false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -673,7 +674,7 @@ func TestMemcachedTracer(t *testing.T) { client.EXPECT().Increment("domain_key_value_1234", uint64(1)).Return(uint64(5), nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, false, "", nil, false)} cache.DoLimit(context.Background(), request, limits) diff --git a/test/mocks/redis/redis.go b/test/mocks/redis/redis.go index 2d6d059f8..192b41c26 100644 --- a/test/mocks/redis/redis.go +++ b/test/mocks/redis/redis.go @@ -68,20 +68,6 @@ func (mr *MockClientMockRecorder) DoCmd(arg0, arg1, arg2 interface{}, arg3 ...in return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoCmd", reflect.TypeOf((*MockClient)(nil).DoCmd), varargs...) } -// ImplicitPipeliningEnabled mocks base method -func (m *MockClient) ImplicitPipeliningEnabled() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ImplicitPipeliningEnabled") - ret0, _ := ret[0].(bool) - return ret0 -} - -// ImplicitPipeliningEnabled indicates an expected call of ImplicitPipeliningEnabled -func (mr *MockClientMockRecorder) ImplicitPipeliningEnabled() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImplicitPipeliningEnabled", reflect.TypeOf((*MockClient)(nil).ImplicitPipeliningEnabled)) -} - // NumActiveConns mocks base method func (m *MockClient) NumActiveConns() int { m.ctrl.T.Helper() diff --git a/test/mocks/stats/manager.go b/test/mocks/stats/manager.go index dd9db2464..522f84868 100644 --- a/test/mocks/stats/manager.go +++ b/test/mocks/stats/manager.go @@ -48,6 +48,15 @@ func (m *MockStatManager) NewStats(key string) stats.RateLimitStats { return ret } +func (m *MockStatManager) NewDomainStats(key string) stats.DomainStats { + ret := stats.DomainStats{} + logger.Debugf("outputing test domain stats %s", key) + ret.Key = key + ret.NotFound = m.store.NewCounter(key + ".domain_not_found") + + return ret +} + func NewMockStatManager(store gostats.Store) stats.Manager { return &MockStatManager{store: store} } diff --git a/test/provider/xds_grpc_sotw_provider_test.go b/test/provider/xds_grpc_sotw_provider_test.go index 49a124bdf..70fdbeddc 100644 --- a/test/provider/xds_grpc_sotw_provider_test.go +++ b/test/provider/xds_grpc_sotw_provider_test.go @@ -86,7 +86,7 @@ func testInitialXdsConfig(snapVersion *int, setSnapshotFunc common.SetSnapshotFu config, err := configEvent.GetConfig() assert.Nil(err) - assert.Equal("foo.k1_v1: unit=MINUTE requests_per_unit=3, shadow_mode: false\n", config.Dump()) + assert.Equal("foo.k1_v1: unit=MINUTE requests_per_unit=3, shadow_mode: false, quota_mode: false\n", config.Dump()) } } @@ -122,7 +122,7 @@ func testNewXdsConfigUpdate(snapVersion *int, setSnapshotFunc common.SetSnapshot config, err := configEvent.GetConfig() assert.Nil(err) - assert.Equal("foo.k2_v2: unit=MINUTE requests_per_unit=5, shadow_mode: false\n", config.Dump()) + assert.Equal("foo.k2_v2: unit=MINUTE requests_per_unit=5, shadow_mode: false, quota_mode: false\n", config.Dump()) } } @@ -173,8 +173,8 @@ func testMultiDomainXdsConfigUpdate(snapVersion *int, setSnapshotFunc common.Set config, err := configEvent.GetConfig() assert.Nil(err) assert.ElementsMatch([]string{ - "foo.k1_v1: unit=MINUTE requests_per_unit=10, shadow_mode: false", - "bar.k1_v1: unit=MINUTE requests_per_unit=100, shadow_mode: false", + "foo.k1_v1: unit=MINUTE requests_per_unit=10, shadow_mode: false, quota_mode: false", + "bar.k1_v1: unit=MINUTE requests_per_unit=100, shadow_mode: false, quota_mode: false", }, strings.Split(strings.TrimSuffix(config.Dump(), "\n"), "\n")) } } @@ -266,13 +266,13 @@ func testDeeperLimitsXdsConfigUpdate(snapVersion *int, setSnapshotFunc common.Se config, err := configEvent.GetConfig() assert.Nil(err) assert.ElementsMatch([]string{ - "foo.k1_v1: unit=MINUTE requests_per_unit=10, shadow_mode: false", - "foo.k1_v1.k2: unit=UNKNOWN requests_per_unit=0, shadow_mode: false", - "foo.k1_v1.k2_v2: unit=HOUR requests_per_unit=15, shadow_mode: false", - "foo.j1_v2: unit=UNKNOWN requests_per_unit=0, shadow_mode: false", - "foo.j1_v2.j2: unit=UNKNOWN requests_per_unit=0, shadow_mode: false", - "foo.j1_v2.j2_v2: unit=DAY requests_per_unit=15, shadow_mode: true", - "bar.k1_v1: unit=MINUTE requests_per_unit=100, shadow_mode: false", + "foo.k1_v1: unit=MINUTE requests_per_unit=10, shadow_mode: false, quota_mode: false", + "foo.k1_v1.k2: unit=UNKNOWN requests_per_unit=0, shadow_mode: false, quota_mode: false", + "foo.k1_v1.k2_v2: unit=HOUR requests_per_unit=15, shadow_mode: false, quota_mode: false", + "foo.j1_v2: unit=UNKNOWN requests_per_unit=0, shadow_mode: false, quota_mode: false", + "foo.j1_v2.j2: unit=UNKNOWN requests_per_unit=0, shadow_mode: false, quota_mode: false", + "foo.j1_v2.j2_v2: unit=DAY requests_per_unit=15, shadow_mode: true, quota_mode: false", + "bar.k1_v1: unit=MINUTE requests_per_unit=100, shadow_mode: false, quota_mode: false", }, strings.Split(strings.TrimSuffix(config.Dump(), "\n"), "\n")) } } @@ -323,8 +323,8 @@ func testSameDomainMultipleXdsConfigUpdate(setSnapshotFunc common.SetSnapshotFun config, err := configEvent.GetConfig() assert.Nil(err) assert.ElementsMatch([]string{ - "foo.k1_v2: unit=MINUTE requests_per_unit=100, shadow_mode: false", - "foo.k1_v1: unit=MINUTE requests_per_unit=10, shadow_mode: false", + "foo.k1_v2: unit=MINUTE requests_per_unit=100, shadow_mode: false, quota_mode: false", + "foo.k1_v1: unit=MINUTE requests_per_unit=10, shadow_mode: false, quota_mode: false", }, strings.Split(strings.TrimSuffix(config.Dump(), "\n"), "\n")) } } diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index b0b2a2153..2c153ba97 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -44,12 +44,12 @@ func BenchmarkParallelDoLimit(b *testing.B) { return func(b *testing.B) { statsStore := gostats.NewStore(gostats.NewNullSink(), false) sm := stats.NewMockStatManager(statsStore) - client := redis.NewClientImpl(statsStore, false, "", "tcp", "single", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit, nil, false, nil) + client := redis.NewClientImpl(statsStore, false, "", "tcp", "single", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit, nil, false, nil, 10*time.Second, "", "") defer client.Close() cache := redis.NewFixedRateLimitCacheImpl(client, nil, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, "", sm, true) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} + limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, false, "", nil, false)} // wait for the pool to fill up for { diff --git a/test/redis/driver_impl_test.go b/test/redis/driver_impl_test.go index cbb5a9560..8d1b30132 100644 --- a/test/redis/driver_impl_test.go +++ b/test/redis/driver_impl_test.go @@ -2,6 +2,7 @@ package redis_test import ( "fmt" + "strings" "testing" "time" @@ -38,7 +39,7 @@ func testNewClientImpl(t *testing.T, pipelineWindow time.Duration, pipelineLimit statsStore := stats.NewStore(stats.NewNullSink(), false) mkRedisClient := func(auth, addr string) redis.Client { - return redis.NewClientImpl(statsStore, false, auth, "tcp", "single", addr, 1, pipelineWindow, pipelineLimit, nil, false, nil) + return redis.NewClientImpl(statsStore, false, auth, "tcp", "single", addr, 1, pipelineWindow, pipelineLimit, nil, false, nil, 10*time.Second, "", "") } t.Run("connection refused", func(t *testing.T) { @@ -65,7 +66,7 @@ func testNewClientImpl(t *testing.T, pipelineWindow time.Duration, pipelineLimit redisSrv.RequireAuth(redisAuth) - assert.PanicsWithError(t, "NOAUTH Authentication required.", func() { + assert.PanicsWithError(t, "response returned from Conn: NOAUTH Authentication required.", func() { mkRedisClient("", redisSrv.Addr()) }) }) @@ -102,36 +103,23 @@ func testNewClientImpl(t *testing.T, pipelineWindow time.Duration, pipelineLimit redisSrv.RequireUserAuth(user, pass) redisAuth := fmt.Sprintf("%s:invalid-password", user) - assert.PanicsWithError(t, "WRONGPASS invalid username-password pair", func() { + assert.PanicsWithError(t, "response returned from Conn: WRONGPASS invalid username-password pair", func() { mkRedisClient(redisAuth, redisSrv.Addr()) }) }) - - t.Run("ImplicitPipeliningEnabled() return expected value", func(t *testing.T) { - redisSrv := mustNewRedisServer() - defer redisSrv.Close() - - client := mkRedisClient("", redisSrv.Addr()) - - if pipelineWindow == 0 && pipelineLimit == 0 { - assert.False(t, client.ImplicitPipeliningEnabled()) - } else { - assert.True(t, client.ImplicitPipeliningEnabled()) - } - }) } } func TestNewClientImpl(t *testing.T) { - t.Run("ImplicitPipeliningEnabled", testNewClientImpl(t, 2*time.Millisecond, 2)) - t.Run("ImplicitPipeliningDisabled", testNewClientImpl(t, 0, 0)) + t.Run("WithPipelineWindow", testNewClientImpl(t, 2*time.Millisecond, 2)) + t.Run("WithoutPipelineWindow", testNewClientImpl(t, 0, 0)) } func TestDoCmd(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) mkRedisClient := func(addr string) redis.Client { - return redis.NewClientImpl(statsStore, false, "", "tcp", "single", addr, 1, 0, 0, nil, false, nil) + return redis.NewClientImpl(statsStore, false, "", "tcp", "single", addr, 1, 0, 0, nil, false, nil, 10*time.Second, "", "") } t.Run("SETGET ok", func(t *testing.T) { @@ -167,7 +155,7 @@ func TestDoCmd(t *testing.T) { assert.Nil(t, client.DoCmd(nil, "SET", "foo", "bar")) redisSrv.Close() - assert.EqualError(t, client.DoCmd(nil, "GET", "foo"), "EOF") + assert.EqualError(t, client.DoCmd(nil, "GET", "foo"), "response returned from Conn: EOF") }) } @@ -176,7 +164,7 @@ func testPipeDo(t *testing.T, pipelineWindow time.Duration, pipelineLimit int) f statsStore := stats.NewStore(stats.NewNullSink(), false) mkRedisClient := func(addr string) redis.Client { - return redis.NewClientImpl(statsStore, false, "", "tcp", "single", addr, 1, pipelineWindow, pipelineLimit, nil, false, nil) + return redis.NewClientImpl(statsStore, false, "", "tcp", "single", addr, 1, pipelineWindow, pipelineLimit, nil, false, nil, 10*time.Second, "", "") } t.Run("SETGET ok", func(t *testing.T) { @@ -219,7 +207,13 @@ func testPipeDo(t *testing.T, pipelineWindow time.Duration, pipelineLimit int) f expectErrContainEOF := func(t *testing.T, err error) { assert.NotNil(t, err) - assert.Contains(t, err.Error(), "EOF") + // radix v4 wraps errors with "response returned from Conn:" + // and may return different connection errors (EOF, connection reset, etc) + errMsg := err.Error() + hasConnectionError := strings.Contains(errMsg, "EOF") || + strings.Contains(errMsg, "connection reset") || + strings.Contains(errMsg, "broken pipe") + assert.True(t, hasConnectionError, "expected connection error, got: %s", errMsg) } expectErrContainEOF(t, client.PipeDo(client.PipeAppend(redis.Pipeline{}, nil, "GET", "foo"))) @@ -228,6 +222,278 @@ func testPipeDo(t *testing.T, pipelineWindow time.Duration, pipelineLimit int) f } func TestPipeDo(t *testing.T) { - t.Run("ImplicitPipeliningEnabled", testPipeDo(t, 10*time.Millisecond, 2)) - t.Run("ImplicitPipeliningDisabled", testPipeDo(t, 0, 0)) + t.Run("WithPipelineWindow", testPipeDo(t, 10*time.Millisecond, 2)) + t.Run("WithoutPipelineWindow", testPipeDo(t, 0, 0)) +} + +// Tests for pool on-empty behavior +func TestPoolOnEmptyBehavior(t *testing.T) { + statsStore := stats.NewStore(stats.NewNullSink(), false) + + // Helper to create client with specific on-empty behavior + mkRedisClientWithBehavior := func(addr, behavior string) redis.Client { + return redis.NewClientImpl(statsStore, false, "", "tcp", "single", addr, 1, 0, 0, nil, false, nil, 10*time.Second, behavior, "") + } + + t.Run("default behavior (empty string)", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + var client redis.Client + assert.NotPanics(t, func() { + client = mkRedisClientWithBehavior(redisSrv.Addr(), "") + }) + assert.NotNil(t, client) + + // Verify client works + var res string + assert.Nil(t, client.DoCmd(nil, "SET", "foo", "bar")) + assert.Nil(t, client.DoCmd(&res, "GET", "foo")) + assert.Equal(t, "bar", res) + }) + + t.Run("ERROR behavior should panic", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + // radix v4 does not support ERROR behavior - should panic at startup + panicErr := expectPanicError(t, func() { + mkRedisClientWithBehavior(redisSrv.Addr(), "ERROR") + }) + assert.Contains(t, panicErr.Error(), "REDIS_POOL_ON_EMPTY_BEHAVIOR=ERROR is not supported in radix v4") + }) + + t.Run("CREATE behavior should panic", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + // radix v4 does not support CREATE behavior - should panic at startup + panicErr := expectPanicError(t, func() { + mkRedisClientWithBehavior(redisSrv.Addr(), "CREATE") + }) + assert.Contains(t, panicErr.Error(), "REDIS_POOL_ON_EMPTY_BEHAVIOR=CREATE is not supported in radix v4") + }) + + t.Run("WAIT behavior", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + var client redis.Client + assert.NotPanics(t, func() { + client = mkRedisClientWithBehavior(redisSrv.Addr(), "WAIT") + }) + assert.NotNil(t, client) + + // Verify client works + var res string + assert.Nil(t, client.DoCmd(nil, "SET", "test5", "value5")) + assert.Nil(t, client.DoCmd(&res, "GET", "test5")) + assert.Equal(t, "value5", res) + }) + + t.Run("case insensitive behavior - lowercase 'error' panics", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + // Test that lowercase 'error' is treated same as 'ERROR' (case insensitive) + panicErr := expectPanicError(t, func() { + mkRedisClientWithBehavior(redisSrv.Addr(), "error") + }) + assert.Contains(t, panicErr.Error(), "REDIS_POOL_ON_EMPTY_BEHAVIOR=ERROR is not supported in radix v4") + }) + + t.Run("case insensitive behavior - lowercase 'create' panics", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + // Test that lowercase 'create' is treated same as 'CREATE' (case insensitive) + panicErr := expectPanicError(t, func() { + mkRedisClientWithBehavior(redisSrv.Addr(), "create") + }) + assert.Contains(t, panicErr.Error(), "REDIS_POOL_ON_EMPTY_BEHAVIOR=CREATE is not supported in radix v4") + }) + + t.Run("case insensitive behavior - lowercase 'wait' works", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + // Test that lowercase 'wait' is treated same as 'WAIT' (case insensitive) + var client redis.Client + assert.NotPanics(t, func() { + client = mkRedisClientWithBehavior(redisSrv.Addr(), "wait") + }) + assert.NotNil(t, client) + + // Verify client works + var res string + assert.Nil(t, client.DoCmd(nil, "SET", "test6", "value6")) + assert.Nil(t, client.DoCmd(&res, "GET", "test6")) + assert.Equal(t, "value6", res) + }) + + t.Run("unknown behavior falls back to default", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + // Unknown behavior should not panic, just log warning and use default + var client redis.Client + assert.NotPanics(t, func() { + client = mkRedisClientWithBehavior(redisSrv.Addr(), "UNKNOWN_BEHAVIOR") + }) + assert.NotNil(t, client) + + // Verify client works + var res string + assert.Nil(t, client.DoCmd(nil, "SET", "test7", "value7")) + assert.Nil(t, client.DoCmd(&res, "GET", "test7")) + assert.Equal(t, "value7", res) + }) +} + +func TestNewClientImplSentinel(t *testing.T) { + statsStore := stats.NewStore(stats.NewNullSink(), false) + + mkSentinelClient := func(auth, sentinelAuth, url string, useTls bool, timeout time.Duration) redis.Client { + // Pass nil for tlsConfig - we can't test TLS without a real TLS server, + // but we can verify the code path is executed (logs will show TLS is enabled) + return redis.NewClientImpl(statsStore, useTls, auth, "tcp", "sentinel", url, 1, 0, 0, nil, false, nil, timeout, "", sentinelAuth) + } + + t.Run("invalid url format - missing sentinel addresses", func(t *testing.T) { + panicErr := expectPanicError(t, func() { + mkSentinelClient("", "", "mymaster", false, 10*time.Second) + }) + assert.Contains(t, panicErr.Error(), "Expected master name and a list of urls for the sentinels") + }) + + t.Run("invalid url format - only master name", func(t *testing.T) { + panicErr := expectPanicError(t, func() { + mkSentinelClient("", "", "mymaster,", false, 10*time.Second) + }) + // Empty sentinel address causes "missing address" error from radix + assert.True(t, + containsAny(panicErr.Error(), []string{"Expected master name", "missing address"}), + "Expected format validation error, got: %s", panicErr.Error()) + }) + + t.Run("connection refused - sentinel not available", func(t *testing.T) { + // Use a port that's unlikely to have a sentinel running + url := "mymaster,localhost:12345" + panicErr := expectPanicError(t, func() { + mkSentinelClient("", "", url, false, 1*time.Second) + }) + // Should fail with connection error or timeout + assert.NotNil(t, panicErr) + assert.True(t, + containsAny(panicErr.Error(), []string{"connection refused", "timeout", "no such host", "connect"}), + "Expected connection error, got: %s", panicErr.Error()) + }) + + t.Run("sentinel auth password only", func(t *testing.T) { + // This will fail to connect, but we're testing that sentinelAuth parameter is accepted + // The log output will show "enabling authentication to redis sentinel" which confirms the code path + url := "mymaster,localhost:12345" + panicErr := expectPanicError(t, func() { + mkSentinelClient("", "sentinel-password", url, false, 1*time.Second) + }) + // Should fail with connection error, not auth error (since we can't connect) + assert.NotNil(t, panicErr) + assert.True(t, + containsAny(panicErr.Error(), []string{"connection refused", "timeout", "no such host", "connect"}), + "Expected connection error, got: %s", panicErr.Error()) + }) + + t.Run("sentinel auth user:password", func(t *testing.T) { + // This will fail to connect, but we're testing that sentinelAuth parameter with user:password format is accepted + // The log output will show "enabling authentication to redis sentinel on ... with user sentinel-user" + url := "mymaster,localhost:12345" + panicErr := expectPanicError(t, func() { + mkSentinelClient("", "sentinel-user:sentinel-pass", url, false, 1*time.Second) + }) + // Should fail with connection error, not auth error (since we can't connect) + assert.NotNil(t, panicErr) + assert.True(t, + containsAny(panicErr.Error(), []string{"connection refused", "timeout", "no such host", "connect"}), + "Expected connection error, got: %s", panicErr.Error()) + }) + + t.Run("sentinel with timeout", func(t *testing.T) { + // Test that timeout parameter is used + url := "mymaster,localhost:12345" + start := time.Now() + panicErr := expectPanicError(t, func() { + mkSentinelClient("", "", url, false, 500*time.Millisecond) + }) + duration := time.Since(start) + assert.NotNil(t, panicErr) + // Timeout should be respected (with some tolerance) + assert.True(t, duration < 2*time.Second, "Timeout should be respected, took %v", duration) + }) + + t.Run("sentinel with multiple addresses", func(t *testing.T) { + // Test that multiple sentinel addresses are accepted in URL format + url := "mymaster,localhost:12345,localhost:12346,localhost:12347" + panicErr := expectPanicError(t, func() { + mkSentinelClient("", "", url, false, 1*time.Second) + }) + // Should fail with connection error, not format error + assert.NotNil(t, panicErr) + assert.NotContains(t, panicErr.Error(), "Expected master name") + assert.True(t, + containsAny(panicErr.Error(), []string{"connection refused", "timeout", "no such host", "connect"}), + "Expected connection error, got: %s", panicErr.Error()) + }) + + t.Run("sentinel with redis auth but no sentinel auth", func(t *testing.T) { + // Test that redis auth and sentinel auth are separate + // redisAuth is for master/replica, sentinelAuth is for sentinel nodes + url := "mymaster,localhost:12345" + panicErr := expectPanicError(t, func() { + mkSentinelClient("redis-password", "", url, false, 1*time.Second) + }) + // Should fail with connection error (can't test auth without real sentinel) + assert.NotNil(t, panicErr) + assert.True(t, + containsAny(panicErr.Error(), []string{"connection refused", "timeout", "no such host", "connect"}), + "Expected connection error, got: %s", panicErr.Error()) + }) + + t.Run("sentinel with TLS enabled", func(t *testing.T) { + // Test that TLS configuration is accepted (will fail to connect without real TLS server) + // The log output will show "enabling TLS to redis sentinel" which confirms the code path + url := "mymaster,localhost:12345" + panicErr := expectPanicError(t, func() { + mkSentinelClient("", "", url, true, 1*time.Second) + }) + // Should fail with connection/TLS error (can't test TLS without real TLS server) + assert.NotNil(t, panicErr) + // Error could be connection refused, TLS handshake failure, or timeout + assert.True(t, + containsAny(panicErr.Error(), []string{"connection refused", "timeout", "no such host", "connect", "tls", "handshake"}), + "Expected connection/TLS error, got: %s", panicErr.Error()) + }) + + t.Run("sentinel with TLS and sentinel auth", func(t *testing.T) { + // Test that both TLS and sentinel auth can be configured together + // The log output will show both TLS and auth messages + url := "mymaster,localhost:12345" + panicErr := expectPanicError(t, func() { + mkSentinelClient("redis-password", "sentinel-password", url, true, 1*time.Second) + }) + // Should fail with connection/TLS error (can't test without real servers) + assert.NotNil(t, panicErr) + assert.True(t, + containsAny(panicErr.Error(), []string{"connection refused", "timeout", "no such host", "connect", "tls", "handshake"}), + "Expected connection/TLS error, got: %s", panicErr.Error()) + }) +} + +// Helper function to check if error message contains any of the given strings +func containsAny(s string, substrs []string) bool { + for _, substr := range substrs { + if strings.Contains(strings.ToLower(s), strings.ToLower(substr)) { + return true + } + } + return false } diff --git a/test/redis/fixed_cache_impl_test.go b/test/redis/fixed_cache_impl_test.go index 8c7f3c474..e52abb68d 100644 --- a/test/redis/fixed_cache_impl_test.go +++ b/test/redis/fixed_cache_impl_test.go @@ -8,7 +8,7 @@ import ( "github.com/envoyproxy/ratelimit/test/mocks/stats" "github.com/coocood/freecache" - "github.com/mediocregopher/radix/v3" + "github.com/mediocregopher/radix/v4" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" gostats "github.com/lyft/gostats" @@ -35,7 +35,10 @@ func TestRedis(t *testing.T) { } func pipeAppend(pipeline redis.Pipeline, rcv interface{}, cmd, key string, args ...interface{}) redis.Pipeline { - return append(pipeline, radix.FlatCmd(rcv, cmd, key, args...)) + return append(pipeline, redis.PipelineAction{ + Action: radix.FlatCmd(rcv, cmd, append([]interface{}{key}, args...)...), + Key: key, + }) } func testRedis(usePerSecondRedis bool) func(*testing.T) { @@ -64,12 +67,12 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { clientUsed = client } - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key_value_1234", uint32(1)).SetArg(1, uint32(5)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key_value_1234", uint64(1)).SetArg(1, uint64(5)).DoAndReturn(pipeAppend) clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_1234", int64(1)).DoAndReturn(pipeAppend) clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -81,20 +84,20 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { clientUsed = client timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key2_value2_subkey2_subvalue2_1200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key2_value2_subkey2_subvalue2_1200", uint64(1)).SetArg(1, uint64(11)).DoAndReturn(pipeAppend) clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key2_value2_subkey2_subvalue2_1200", int64(60)).DoAndReturn(pipeAppend) clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) - request = common.NewRateLimitRequest( + request = common.NewRateLimitRequestWithPerDescriptorHitsAddend( "domain", [][][2]string{ {{"key2", "value2"}}, {{"key2", "value2"}, {"subkey2", "subvalue2"}}, - }, 1) + }, []uint64{0, 1}) limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false, "", nil, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, sm.NewStats("key2_value2_subkey2_subvalue2"), false, false, false, "", nil, false), } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -109,23 +112,23 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { clientUsed = client timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(5) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key3_value3_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key3_value3_997200", uint64(0)).SetArg(1, uint64(11)).DoAndReturn(pipeAppend) clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key3_value3_997200", int64(3600)).DoAndReturn(pipeAppend) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key3_value3_subkey3_subvalue3_950400", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key3_value3_subkey3_subvalue3_950400", uint64(1)).SetArg(1, uint64(13)).DoAndReturn(pipeAppend) clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key3_value3_subkey3_subvalue3_950400", int64(86400)).DoAndReturn(pipeAppend) clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) - request = common.NewRateLimitRequest( + request = common.NewRateLimitRequestWithPerDescriptorHitsAddend( "domain", [][][2]string{ {{"key3", "value3"}}, {{"key3", "value3"}, {"subkey3", "subvalue3"}}, - }, 1) + }, []uint64{0, 1}) limits = []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false, false, "", nil, false), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false, "", nil, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key3_value3"), false, false, false, "", nil, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, sm.NewStats("key3_value3_subkey3_subvalue3"), false, false, false, "", nil, false), } assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -133,48 +136,50 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, }, cache.DoLimit(context.Background(), request, limits)) - assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) - assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) - assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) - assert.Equal(uint64(0), limits[0].Stats.WithinLimit.Value()) - assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) - assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.WithinLimit.Value()) + assert.Equal(uint64(1), limits[1].Stats.TotalHits.Value()) + assert.Equal(uint64(1), limits[1].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[1].Stats.NearLimit.Value()) + assert.Equal(uint64(0), limits[1].Stats.WithinLimit.Value()) } } -func testLocalCacheStats(localCacheStats gostats.StatGenerator, statsStore gostats.Store, sink *common.TestStatSink, - expectedHitCount int, expectedMissCount int, expectedLookUpCount int, expectedExpiredCount int, - expectedEntryCount int) func(*testing.T) { +func testLocalCacheStats(localCacheScopeName string, localCacheStats gostats.StatGenerator, statsStore gostats.Store, sink *common.TestStatSink, + expectedHitCount uint64, expectedMissCount uint64, expectedLookUpCount uint64, expectedExpiredCount uint64, + expectedEntryCount uint64, +) func(*testing.T) { return func(t *testing.T) { localCacheStats.GenerateStats() statsStore.Flush() + prefix := localCacheScopeName + "." // Check whether all local_cache related stats are available. - _, ok := sink.Record["averageAccessTime"] + _, ok := sink.Record[prefix+"averageAccessTime"] assert.Equal(t, true, ok) - hitCount, ok := sink.Record["hitCount"] + hitCount, ok := sink.Record[prefix+"hitCount"] assert.Equal(t, true, ok) - missCount, ok := sink.Record["missCount"] + missCount, ok := sink.Record[prefix+"missCount"] assert.Equal(t, true, ok) - lookupCount, ok := sink.Record["lookupCount"] + lookupCount, ok := sink.Record[prefix+"lookupCount"] assert.Equal(t, true, ok) - _, ok = sink.Record["overwriteCount"] + _, ok = sink.Record[prefix+"overwriteCount"] assert.Equal(t, true, ok) - _, ok = sink.Record["evacuateCount"] + _, ok = sink.Record[prefix+"evacuateCount"] assert.Equal(t, true, ok) - expiredCount, ok := sink.Record["expiredCount"] + expiredCount, ok := sink.Record[prefix+"expiredCount"] assert.Equal(t, true, ok) - entryCount, ok := sink.Record["entryCount"] + entryCount, ok := sink.Record[prefix+"entryCount"] assert.Equal(t, true, ok) // Check the correctness of hitCount, missCount, lookupCount, expiredCount and entryCount - assert.Equal(t, expectedHitCount, hitCount.(int)) - assert.Equal(t, expectedMissCount, missCount.(int)) - assert.Equal(t, expectedLookUpCount, lookupCount.(int)) - assert.Equal(t, expectedExpiredCount, expiredCount.(int)) - assert.Equal(t, expectedEntryCount, entryCount.(int)) + assert.Equal(t, expectedHitCount, hitCount.(uint64)) + assert.Equal(t, expectedMissCount, missCount.(uint64)) + assert.Equal(t, expectedLookUpCount, lookupCount.(uint64)) + assert.Equal(t, expectedExpiredCount, expiredCount.(uint64)) + assert.Equal(t, expectedEntryCount, entryCount.(uint64)) sink.Clear() } @@ -188,15 +193,17 @@ func TestOverLimitWithLocalCache(t *testing.T) { client := mock_redis.NewMockClient(controller) timeSource := mock_utils.NewMockTimeSource(controller) localCache := freecache.NewCache(100) - statsStore := gostats.NewStore(gostats.NewNullSink(), false) + sink := common.NewTestStatSink() + statsStore := gostats.NewStore(sink, false) sm := stats.NewMockStatManager(statsStore) cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8, "", sm, false) - sink := &common.TestStatSink{} - localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) + + localCacheScopeName := "localcache" + localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope(localCacheScopeName)) // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).SetArg(1, uint64(11)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -204,7 +211,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, "", nil, false), + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, false, "", nil, false), } assert.Equal( @@ -219,11 +226,11 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal(uint64(1), limits[0].Stats.WithinLimit.Value()) // Check the local cache stats. - testLocalCacheStats(localCacheStats, statsStore, sink, 0, 1, 1, 0, 0) + t.Run("TestLocalCacheStats", testLocalCacheStats(localCacheScopeName, localCacheStats, statsStore, sink, 0, 1, 1, 0, 0)) // Test Near Limit Stats. At Near Limit Ratio, still OK timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).SetArg(1, uint64(13)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -240,11 +247,11 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal(uint64(2), limits[0].Stats.WithinLimit.Value()) // Check the local cache stats. - testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 2, 0, 0) + t.Run("TestLocalCacheStats_1", testLocalCacheStats(localCacheScopeName, localCacheStats, statsStore, sink, 0, 2, 2, 0, 0)) // Test Over limit stats timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(16)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).SetArg(1, uint64(16)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -261,11 +268,11 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal(uint64(2), limits[0].Stats.WithinLimit.Value()) // Check the local cache stats. - testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 3, 0, 1) + t.Run("TestLocalCacheStats_2", testLocalCacheStats(localCacheScopeName, localCacheStats, statsStore, sink, 0, 3, 3, 0, 1)) // Test Over limit stats with local cache timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).Times(0) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).Times(0) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).Times(0) assert.Equal( @@ -280,7 +287,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal(uint64(2), limits[0].Stats.WithinLimit.Value()) // Check the local cache stats. - testLocalCacheStats(localCacheStats, statsStore, sink, 1, 3, 4, 0, 1) + t.Run("TestLocalCacheStats_3", testLocalCacheStats(localCacheScopeName, localCacheStats, statsStore, sink, 1, 3, 4, 0, 1)) } func TestNearLimit(t *testing.T) { @@ -296,7 +303,7 @@ func TestNearLimit(t *testing.T) { // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).SetArg(1, uint64(11)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -304,7 +311,7 @@ func TestNearLimit(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, "", nil, false), + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, false, "", nil, false), } assert.Equal( @@ -319,7 +326,7 @@ func TestNearLimit(t *testing.T) { // Test Near Limit Stats. At Near Limit Ratio, still OK timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).SetArg(1, uint64(13)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -337,7 +344,7 @@ func TestNearLimit(t *testing.T) { // Test Near Limit Stats. We went OVER_LIMIT, but the near_limit counter only increases // when we are near limit, not after we have passed the limit. timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(16)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).SetArg(1, uint64(16)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -355,12 +362,12 @@ func TestNearLimit(t *testing.T) { // Now test hitsAddend that is greater than 1 // All of it under limit, under near limit timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_1234", uint32(3)).SetArg(1, uint32(5)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_1234", uint64(3)).SetArg(1, uint64(5)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key5_value5_1234", int64(1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"), false, false, "", nil, false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key5_value5"), false, false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -372,12 +379,12 @@ func TestNearLimit(t *testing.T) { // All of it under limit, some over near limit timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key6_value6_1234", uint32(2)).SetArg(1, uint32(7)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key6_value6_1234", uint64(2)).SetArg(1, uint64(7)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key6_value6_1234", int64(1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) - limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"), false, false, "", nil, false)} + limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key6_value6"), false, false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -389,12 +396,12 @@ func TestNearLimit(t *testing.T) { // All of it under limit, all of it over near limit timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key7_value7_1234", uint32(3)).SetArg(1, uint32(19)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key7_value7_1234", uint64(3)).SetArg(1, uint64(19)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key7_value7_1234", int64(1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"), false, false, "", nil, false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key7_value7"), false, false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -406,12 +413,12 @@ func TestNearLimit(t *testing.T) { // Some of it over limit, all of it over near limit timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key8_value8_1234", uint32(3)).SetArg(1, uint32(22)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key8_value8_1234", uint64(3)).SetArg(1, uint64(22)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key8_value8_1234", int64(1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"), false, false, "", nil, false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key8_value8"), false, false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -423,12 +430,12 @@ func TestNearLimit(t *testing.T) { // Some of it in all three places timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key9_value9_1234", uint32(7)).SetArg(1, uint32(22)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key9_value9_1234", uint64(7)).SetArg(1, uint64(22)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key9_value9_1234", int64(1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"), false, false, "", nil, false)} + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key9_value9"), false, false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -440,12 +447,12 @@ func TestNearLimit(t *testing.T) { // all of it over limit timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key10_value10_1234", uint32(3)).SetArg(1, uint32(30)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key10_value10_1234", uint64(3)).SetArg(1, uint64(30)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key10_value10_1234", int64(1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"), false, false, "", nil, false)} + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key10_value10"), false, false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -470,12 +477,12 @@ func TestRedisWithJitter(t *testing.T) { timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) jitterSource.EXPECT().Int63().Return(int64(100)) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key_value_1234", uint32(1)).SetArg(1, uint32(5)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key_value_1234", uint64(1)).SetArg(1, uint64(5)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_1234", int64(101)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, false, "", nil, false)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}}, @@ -494,15 +501,17 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { client := mock_redis.NewMockClient(controller) timeSource := mock_utils.NewMockTimeSource(controller) localCache := freecache.NewCache(100) - statsStore := gostats.NewStore(gostats.NewNullSink(), false) + sink := common.NewTestStatSink() + statsStore := gostats.NewStore(sink, false) sm := stats.NewMockStatManager(statsStore) cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8, "", sm, false) - sink := &common.TestStatSink{} - localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) + + localCacheScopeName := "localcache" + localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope(localCacheScopeName)) // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).SetArg(1, uint64(11)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -510,7 +519,7 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, true, "", nil, false), + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, true, false, "", nil, false), } assert.Equal( @@ -525,11 +534,11 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { assert.Equal(uint64(1), limits[0].Stats.WithinLimit.Value()) // Check the local cache stats. - testLocalCacheStats(localCacheStats, statsStore, sink, 0, 1, 1, 0, 0) + t.Run("TestLocalCacheStats", testLocalCacheStats(localCacheScopeName, localCacheStats, statsStore, sink, 0, 1, 1, 0, 0)) // Test Near Limit Stats. At Near Limit Ratio, still OK timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).SetArg(1, uint64(13)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -546,11 +555,11 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { assert.Equal(uint64(2), limits[0].Stats.WithinLimit.Value()) // Check the local cache stats. - testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 2, 0, 0) + t.Run("TestLocalCacheStats_1", testLocalCacheStats(localCacheScopeName, localCacheStats, statsStore, sink, 0, 2, 2, 0, 0)) // Test Over limit stats timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(16)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).SetArg(1, uint64(16)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) @@ -569,11 +578,11 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { assert.Equal(uint64(2), limits[0].Stats.WithinLimit.Value()) // Check the local cache stats. - testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 3, 0, 1) + t.Run("TestLocalCacheStats_2", testLocalCacheStats(localCacheScopeName, localCacheStats, statsStore, sink, 0, 3, 3, 0, 1)) // Test Over limit stats with local cache timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).Times(0) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).Times(0) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).Times(0) @@ -593,7 +602,7 @@ func TestOverLimitWithLocalCacheShadowRule(t *testing.T) { assert.Equal(uint64(2), limits[0].Stats.WithinLimit.Value()) // Check the local cache stats. - testLocalCacheStats(localCacheStats, statsStore, sink, 1, 3, 4, 0, 1) + t.Run("TestLocalCacheStats_3", testLocalCacheStats(localCacheScopeName, localCacheStats, statsStore, sink, 1, 3, 4, 0, 1)) } func TestRedisTracer(t *testing.T) { @@ -613,12 +622,12 @@ func TestRedisTracer(t *testing.T) { timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key_value_1234", uint32(1)).SetArg(1, uint32(5)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key_value_1234", uint64(1)).SetArg(1, uint64(5)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_1234", int64(1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, "", nil, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, sm.NewStats("key_value"), false, false, false, "", nil, false)} cache.DoLimit(context.Background(), request, limits) spanStubs := testSpanExporter.GetSpans() @@ -635,31 +644,33 @@ func TestOverLimitWithStopCacheKeyIncrementWhenOverlimitConfig(t *testing.T) { client := mock_redis.NewMockClient(controller) timeSource := mock_utils.NewMockTimeSource(controller) localCache := freecache.NewCache(100) - statsStore := gostats.NewStore(gostats.NewNullSink(), false) + sink := common.NewTestStatSink() + statsStore := gostats.NewStore(sink, false) sm := stats.NewMockStatManager(statsStore) cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8, "", sm, true) - sink := &common.TestStatSink{} - localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) + + localCacheScopeName := "localcache" + localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope(localCacheScopeName)) // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(5) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_997200").SetArg(1, uint32(11)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key5_value5_997200").SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_997200").SetArg(1, uint64(10)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key5_value5_997200").SetArg(1, uint64(10)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(1)).SetArg(1, uint64(11)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_997200", uint64(1)).SetArg(1, uint64(11)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key5_value5_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil).Times(2) - request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}, {{"key5", "value5"}}}, 1) + request := common.NewRateLimitRequestWithPerDescriptorHitsAddend("domain", [][][2]string{{{"key4", "value4"}}, {{"key5", "value5"}}}, []uint64{1, 1}) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, "", nil, false), - config.NewRateLimit(14, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key5_value5"), false, false, "", nil, false), + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key4_value4"), false, false, false, "", nil, false), + config.NewRateLimit(14, pb.RateLimitResponse_RateLimit_HOUR, sm.NewStats("key5_value5"), false, false, false, "", nil, false), } assert.Equal( @@ -680,69 +691,71 @@ func TestOverLimitWithStopCacheKeyIncrementWhenOverlimitConfig(t *testing.T) { assert.Equal(uint64(1), limits[1].Stats.WithinLimit.Value()) // Check the local cache stats. - testLocalCacheStats(localCacheStats, statsStore, sink, 0, 1, 1, 0, 0) + t.Run("TestLocalCacheStats", testLocalCacheStats(localCacheScopeName, localCacheStats, statsStore, sink, 0, 2, 2, 0, 0)) - // Test Near Limit Stats. At Near Limit Ratio, still OK + // Test Near Limit Stats. Some hits at Near Limit Ratio, but still OK. timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(5) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_997200").SetArg(1, uint32(13)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key5_value5_997200").SetArg(1, uint32(13)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_997200").SetArg(1, uint64(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key5_value5_997200").SetArg(1, uint64(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(2)).SetArg(1, uint64(13)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_997200", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_997200", uint64(2)).SetArg(1, uint64(13)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key5_value5_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil).Times(2) + request = common.NewRateLimitRequestWithPerDescriptorHitsAddend("domain", [][][2]string{{{"key4", "value4"}}, {{"key5", "value5"}}}, []uint64{2, 2}) + assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[1].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, }, cache.DoLimit(context.Background(), request, limits)) - assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) - assert.Equal(uint64(2), limits[0].Stats.WithinLimit.Value()) - assert.Equal(uint64(2), limits[1].Stats.TotalHits.Value()) + assert.Equal(uint64(3), limits[0].Stats.WithinLimit.Value()) + assert.Equal(uint64(3), limits[1].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[1].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[1].Stats.OverLimitWithLocalCache.Value()) - assert.Equal(uint64(1), limits[1].Stats.NearLimit.Value()) - assert.Equal(uint64(2), limits[1].Stats.WithinLimit.Value()) + assert.Equal(uint64(2), limits[1].Stats.NearLimit.Value()) + assert.Equal(uint64(3), limits[1].Stats.WithinLimit.Value()) // Check the local cache stats. - testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 2, 0, 0) + t.Run("TestLocalCacheStats_1", testLocalCacheStats(localCacheScopeName, localCacheStats, statsStore, sink, 0, 4, 4, 0, 0)) // Test one key is reaching to the Overlimit threshold timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(5) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_997200").SetArg(1, uint32(14)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key5_value5_997200").SetArg(1, uint32(14)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(0)).SetArg(1, uint32(14)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_997200").SetArg(1, uint64(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key5_value5_997200").SetArg(1, uint64(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint64(0)).SetArg(1, uint64(13)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_997200", uint32(1)).SetArg(1, uint32(14)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_997200", uint64(2)).SetArg(1, uint64(15)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key5_value5_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil).Times(2) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(&limits[0].Limit.Unit, timeSource)}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(&limits[1].Limit.Unit, timeSource)}, }, cache.DoLimit(context.Background(), request, limits)) - assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(5), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) assert.Equal(uint64(2), limits[0].Stats.NearLimit.Value()) - assert.Equal(uint64(3), limits[0].Stats.WithinLimit.Value()) - assert.Equal(uint64(3), limits[1].Stats.TotalHits.Value()) - assert.Equal(uint64(0), limits[1].Stats.OverLimit.Value()) + assert.Equal(uint64(5), limits[0].Stats.WithinLimit.Value()) + assert.Equal(uint64(5), limits[1].Stats.TotalHits.Value()) + assert.Equal(uint64(1), limits[1].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[1].Stats.OverLimitWithLocalCache.Value()) - assert.Equal(uint64(2), limits[1].Stats.NearLimit.Value()) + assert.Equal(uint64(3), limits[1].Stats.NearLimit.Value()) assert.Equal(uint64(3), limits[1].Stats.WithinLimit.Value()) // Check the local cache stats. - testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 3, 0, 1) + t.Run("TestLocalCacheStats_2", testLocalCacheStats(localCacheScopeName, localCacheStats, statsStore, sink, 0, 6, 6, 0, 1)) } diff --git a/test/server/server_impl_test.go b/test/server/server_impl_test.go index 9896b7987..899f56406 100644 --- a/test/server/server_impl_test.go +++ b/test/server/server_impl_test.go @@ -26,8 +26,8 @@ func assertHttpResponse(t *testing.T, requestBody string, expectedStatusCode int, expectedContentType string, - expectedResponseBody string) { - + expectedResponseBody string, +) { t.Helper() assert := assert.New(t) diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index 4d2cde532..f22309348 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -153,7 +153,7 @@ func TestService(test *testing.T) { request = common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, false, "key_name", nil, false), nil, } t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) @@ -187,7 +187,7 @@ func TestService(test *testing.T) { // Config should still be valid. Also make sure order does not affect results. limits = []*config.RateLimit{ nil, - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, false, "", nil, false), } t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[1]).Return(limits[1]) @@ -241,7 +241,7 @@ func TestServiceGlobalShadowMode(test *testing.T) { // Global Shadow mode limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, false, "", nil, false), nil, } t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) @@ -281,8 +281,8 @@ func TestRuleShadowMode(test *testing.T) { request := common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true, "", nil, false), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true, "", nil, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true, false, "", nil, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true, false, "", nil, false), } t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[1]).Return(limits[1]) @@ -314,8 +314,8 @@ func TestMixedRuleShadowMode(test *testing.T) { request := common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true, "", nil, false), - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, true, false, "", nil, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, false, "", nil, false), } t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[1]).Return(limits[1]) @@ -374,7 +374,7 @@ func TestServiceWithCustomRatelimitHeaders(test *testing.T) { request := common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, false, "", nil, false), nil, } t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) @@ -427,7 +427,7 @@ func TestServiceWithDefaultRatelimitHeaders(test *testing.T) { request := common.NewRateLimitRequest( "different-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, false, "", nil, false), nil, } t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) @@ -487,7 +487,7 @@ func TestCacheError(test *testing.T) { service := t.setupBasicService() request := common.NewRateLimitRequest("different-domain", [][][2]string{{{"foo", "bar"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, "", nil, false)} + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, false, "", nil, false)} t.config.EXPECT().GetLimit(context.Background(), "different-domain", request.Descriptors[0]).Return(limits[0]) t.cache.EXPECT().DoLimit(context.Background(), request, limits).Do( func(context.Context, *pb.RateLimitRequest, []*config.RateLimit) { @@ -529,9 +529,9 @@ func TestUnlimited(test *testing.T) { request := common.NewRateLimitRequest( "some-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}, {{"baz", "qux"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("foo_bar"), false, false, "", nil, false), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("foo_bar"), false, false, false, "", nil, false), nil, - config.NewRateLimit(55, pb.RateLimitResponse_RateLimit_SECOND, t.statsManager.NewStats("baz_qux"), true, false, "", nil, false), + config.NewRateLimit(55, pb.RateLimitResponse_RateLimit_SECOND, t.statsManager.NewStats("baz_qux"), true, false, false, "", nil, false), } t.config.EXPECT().GetLimit(context.Background(), "some-domain", request.Descriptors[0]).Return(limits[0]) t.config.EXPECT().GetLimit(context.Background(), "some-domain", request.Descriptors[1]).Return(limits[1]) @@ -665,3 +665,277 @@ func TestServiceHealthStatusAtLeastOneConfigLoaded(test *testing.T) { test.Errorf("expected status NOT_SERVING actual %v", res.Status) } } + +func TestServiceGlobalQuotaMode(test *testing.T) { + os.Setenv("QUOTA_MODE", "true") + defer func() { + os.Unsetenv("QUOTA_MODE") + }() + + t := commonSetup(test) + defer t.controller.Finish() + + // No global quota_mode, this should be picked-up from environment variables during re-load of config + service := t.setupBasicService() + + // Force a config reload. + barrier := newBarrier() + t.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { + barrier.signal() + return t.config, nil + }) + t.configUpdateEventChan <- t.configUpdateEvent + barrier.wait() + + // Make a request. + request := common.NewRateLimitRequest( + "quota-domain", [][][2]string{{{"foo", "bar"}}, {{"hello", "world"}}}, 1) + + // Global Quota mode + limits := []*config.RateLimit{ + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, false, "", nil, false), + config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key2"), false, false, false, "", nil, false), + } + t.config.EXPECT().GetLimit(context.Background(), "quota-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(context.Background(), "quota-domain", request.Descriptors[1]).Return(limits[1]) + t.cache.EXPECT().DoLimit(context.Background(), request, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0}, + }) + response, err := service.ShouldRateLimit(context.Background(), request) + + // OK overall code even if limit response was OVER_LIMIT, because global quota mode is enabled + common.AssertProtoEqual( + t.assert, + &pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OK, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[1].Limit, LimitRemaining: 0}, + }, + }, + response) + t.assert.Nil(err) +} + +func TestServiceQuotaModeWithMetadata(test *testing.T) { + os.Setenv("QUOTA_MODE", "true") + os.Setenv("RESPONSE_DYNAMIC_METADATA", "true") + defer func() { + os.Unsetenv("QUOTA_MODE") + os.Unsetenv("RESPONSE_DYNAMIC_METADATA") + }() + + t := commonSetup(test) + defer t.controller.Finish() + + service := t.setupBasicService() + + // Force a config reload to pick up environment variables. + barrier := newBarrier() + t.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { + barrier.signal() + return t.config, nil + }) + t.configUpdateEventChan <- t.configUpdateEvent + barrier.wait() + + // Make a request. + request := common.NewRateLimitRequest( + "quota-domain", [][][2]string{{{"regular", "limit"}}, {{"quota", "limit"}}}, 1) + + limits := []*config.RateLimit{ + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key"), false, false, false, "", nil, false), + config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_MINUTE, t.statsManager.NewStats("key2"), false, false, true, "", nil, false), + } + t.config.EXPECT().GetLimit(context.Background(), "quota-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(context.Background(), "quota-domain", request.Descriptors[1]).Return(limits[1]) + t.cache.EXPECT().DoLimit(context.Background(), request, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0}, + }) + response, err := service.ShouldRateLimit(context.Background(), request) + + // Verify response includes metadata about quota violations + t.assert.Nil(err) + t.assert.Equal(pb.RateLimitResponse_OK, response.OverallCode) + t.assert.NotNil(response.DynamicMetadata) + + // Check that quota violation is tracked in metadata for descriptor index 1 + quotaViolations := response.DynamicMetadata.Fields["quotaModeViolations"] + t.assert.NotNil(quotaViolations) + violations := quotaViolations.GetListValue() + t.assert.Len(violations.Values, 1) + t.assert.Equal(float64(1), violations.Values[0].GetNumberValue()) + + // Check that quotaModeEnabled is true + quotaModeEnabled := response.DynamicMetadata.Fields["quotaModeEnabled"] + t.assert.NotNil(quotaModeEnabled) + t.assert.True(quotaModeEnabled.GetBoolValue()) +} + +func TestServicePerDescriptorQuotaMode(test *testing.T) { + t := commonSetup(test) + defer t.controller.Finish() + + // No Global Quota mode + service := t.setupBasicService() + + request := common.NewRateLimitRequest( + "quota-domain", [][][2]string{{{"regular", "limit"}}, {{"quota", "limit"}}}, 1) + + // Create limits with one having quota mode enabled per-descriptor + limits := []*config.RateLimit{ + // Regular limit - should reject when exceeded + { + FullKey: "regular_limit", + Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: 5, Unit: pb.RateLimitResponse_RateLimit_MINUTE}, + QuotaMode: false, + ShadowMode: false, + }, + // Quota mode limit - should not reject when exceeded + { + FullKey: "quota_limit", + Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: 3, Unit: pb.RateLimitResponse_RateLimit_MINUTE}, + QuotaMode: true, + ShadowMode: false, + }, + } + + t.config.EXPECT().GetLimit(context.Background(), "quota-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(context.Background(), "quota-domain", request.Descriptors[1]).Return(limits[1]) + t.cache.EXPECT().DoLimit(context.Background(), request, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0}, + }) + response, err := service.ShouldRateLimit(context.Background(), request) + + // Regular limit should cause OVER_LIMIT overall, but quota mode limit should be converted to OK + common.AssertProtoEqual( + t.assert, + &pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OVER_LIMIT, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[1].Limit, LimitRemaining: 0}, + }, + }, + response) + t.assert.Nil(err) +} + +func TestServiceQuotaModeOnly(test *testing.T) { + t := commonSetup(test) + defer t.controller.Finish() + + service := t.setupBasicService() + + request := common.NewRateLimitRequest( + "quota-domain", [][][2]string{{{"quota1", "limit"}}, {{"quota2", "limit"}}}, 1) + + // Both limits are in quota mode + limits := []*config.RateLimit{ + { + FullKey: "quota_limit_1", + Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: 5, Unit: pb.RateLimitResponse_RateLimit_MINUTE}, + QuotaMode: true, + ShadowMode: false, + }, + { + FullKey: "quota_limit_2", + Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: 3, Unit: pb.RateLimitResponse_RateLimit_MINUTE}, + QuotaMode: true, + ShadowMode: false, + }, + } + + t.config.EXPECT().GetLimit(context.Background(), "quota-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(context.Background(), "quota-domain", request.Descriptors[1]).Return(limits[1]) + t.cache.EXPECT().DoLimit(context.Background(), request, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0}, + }) + response, err := service.ShouldRateLimit(context.Background(), request) + + // All quota mode limits should result in OK overall code + common.AssertProtoEqual( + t.assert, + &pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OK, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[1].Limit, LimitRemaining: 0}, + }, + }, + response) + t.assert.Nil(err) +} + +func TestServiceQuotaModeWithShadowMode(test *testing.T) { + os.Setenv("SHADOW_MODE", "true") + defer func() { + os.Unsetenv("SHADOW_MODE") + }() + + t := commonSetup(test) + defer t.controller.Finish() + + service := t.setupBasicService() + + // Force a config reload to pick up environment variables. + barrier := newBarrier() + t.configUpdateEvent.EXPECT().GetConfig().DoAndReturn(func() (config.RateLimitConfig, any) { + barrier.signal() + return t.config, nil + }) + t.configUpdateEventChan <- t.configUpdateEvent + barrier.wait() + + request := common.NewRateLimitRequest( + "quota-domain", [][][2]string{{{"regular", "limit"}}, {{"quota", "limit"}}}, 1) + + // Mix of regular and quota mode limits with global shadow mode + limits := []*config.RateLimit{ + { + FullKey: "regular_limit", + Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: 5, Unit: pb.RateLimitResponse_RateLimit_MINUTE}, + QuotaMode: false, + ShadowMode: false, + }, + { + FullKey: "quota_limit", + Limit: &pb.RateLimitResponse_RateLimit{RequestsPerUnit: 3, Unit: pb.RateLimitResponse_RateLimit_MINUTE}, + QuotaMode: true, + ShadowMode: false, + }, + } + + t.config.EXPECT().GetLimit(context.Background(), "quota-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(context.Background(), "quota-domain", request.Descriptors[1]).Return(limits[1]) + t.cache.EXPECT().DoLimit(context.Background(), request, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0}, + }) + response, err := service.ShouldRateLimit(context.Background(), request) + + // Global shadow mode should override everything and result in OK + common.AssertProtoEqual( + t.assert, + &pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OK, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[1].Limit, LimitRemaining: 0}, + }, + }, + response) + t.assert.Nil(err) + + // Verify global shadow mode counter is incremented + t.assert.EqualValues(1, t.statStore.NewCounter("global_shadow_mode").Value()) +}