-
Notifications
You must be signed in to change notification settings - Fork 326
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add local rate limiting acceptance test #2932
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change | ||||||
---|---|---|---|---|---|---|---|---|
@@ -0,0 +1,126 @@ | ||||||||
// Copyright (c) HashiCorp, Inc. | ||||||||
// SPDX-License-Identifier: MPL-2.0 | ||||||||
|
||||||||
package connect | ||||||||
|
||||||||
import ( | ||||||||
"testing" | ||||||||
"time" | ||||||||
|
||||||||
terratestK8s "github.com/gruntwork-io/terratest/modules/k8s" | ||||||||
|
||||||||
"github.com/hashicorp/consul-k8s/acceptance/framework/connhelper" | ||||||||
"github.com/hashicorp/consul-k8s/acceptance/framework/consul" | ||||||||
"github.com/hashicorp/consul-k8s/acceptance/framework/environment" | ||||||||
"github.com/hashicorp/consul-k8s/acceptance/framework/helpers" | ||||||||
"github.com/hashicorp/consul-k8s/acceptance/framework/k8s" | ||||||||
"github.com/hashicorp/consul/sdk/testutil/retry" | ||||||||
"github.com/stretchr/testify/require" | ||||||||
) | ||||||||
|
||||||||
// TestConnectInject_LocalRateLimiting tests that local rate limiting works as expected between services. | ||||||||
func TestConnectInject_LocalRateLimiting(t *testing.T) { | ||||||||
cfg := suite.Config() | ||||||||
ctx := suite.Environment().DefaultContext(t) | ||||||||
|
||||||||
releaseName := helpers.RandomName() | ||||||||
connHelper := connhelper.ConnectHelper{ | ||||||||
ClusterKind: consul.Helm, | ||||||||
Secure: false, | ||||||||
ReleaseName: releaseName, | ||||||||
Ctx: ctx, | ||||||||
UseAppNamespace: cfg.EnableRestrictedPSAEnforcement, | ||||||||
Cfg: cfg, | ||||||||
} | ||||||||
|
||||||||
connHelper.Setup(t) | ||||||||
connHelper.Install(t) | ||||||||
connHelper.DeployClientAndServer(t) | ||||||||
connHelper.TestConnectionSuccess(t, connhelper.ConnHelperOpts{}) | ||||||||
|
||||||||
// Map the static-server URL and path to the rate limits defined in the service defaults at: | ||||||||
// ../fixtures/cases/local-rate-limiting/service-defaults-static-server.yaml | ||||||||
rateLimitMap := map[string]int{ | ||||||||
"http://localhost:1234": 2, | ||||||||
"http://localhost:1234/exact": 3, | ||||||||
"http://localhost:1234/prefix-test": 4, | ||||||||
"http://localhost:1234/regex": 5, | ||||||||
} | ||||||||
|
||||||||
opts := newRateLimitOptions(t, ctx) | ||||||||
|
||||||||
// Ensure that all requests from static-client to static-server succeed. | ||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||||
for addr, rps := range rateLimitMap { | ||||||||
opts.rps = rps | ||||||||
assertRateLimits(t, opts, addr) | ||||||||
} | ||||||||
|
||||||||
// Apply local rate limiting to the static-server | ||||||||
writeCrd(t, connHelper, "../fixtures/cases/local-rate-limiting") | ||||||||
|
||||||||
// Ensure that going over the limit causes the static-server to apply rate limiting and | ||||||||
// reply with 429 Too Many Requests | ||||||||
opts.enforced = true | ||||||||
for addr, reqPerSec := range rateLimitMap { | ||||||||
opts.rps = reqPerSec | ||||||||
assertRateLimits(t, opts, addr) | ||||||||
} | ||||||||
} | ||||||||
|
||||||||
func assertRateLimits(t *testing.T, opts *assertRateLimitOptions, curlArgs ...string) { | ||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Just an observation that I think this mostly just tests There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes this test is bursty. It was pretty difficult to setup a reliable burst test and with the current framework I think it would be even more difficult to reliably test throttling. We're using I'm open to ideas if you have thoughts on how we could do this 🤔 |
||||||||
t.Helper() | ||||||||
|
||||||||
args := []string{"exec", opts.resourceType + opts.sourceApp, "-c", opts.sourceApp, "--", "curl", opts.curlOpts} | ||||||||
args = append(args, curlArgs...) | ||||||||
|
||||||||
// This check is time sensitive due to the nature of rate limiting. | ||||||||
// Run the entire assertion in a retry block and on each pass: | ||||||||
// 1. Send the exact number of requests that are allowed per the rate limiting configuration | ||||||||
// and check that all the requests succeed. | ||||||||
// 2. Send an extra request that should exceed the configured rate limit and check that this request fails. | ||||||||
// 3. Make sure that all requests happened within the rate limit enforcement window of one second. | ||||||||
retry.RunWith(opts.retryTimer, t, func(r *retry.R) { | ||||||||
// Make up to the allowed numbers of calls in a second | ||||||||
t0 := time.Now() | ||||||||
for i := 0; i < opts.rps; i++ { | ||||||||
output, err := k8s.RunKubectlAndGetOutputE(t, opts.k8sOpts, args...) | ||||||||
require.NoError(r, err) | ||||||||
require.Contains(r, output, opts.successOutput) | ||||||||
} | ||||||||
|
||||||||
// Exceed the configured rate limit. | ||||||||
output, err := k8s.RunKubectlAndGetOutputE(t, opts.k8sOpts, args...) | ||||||||
if opts.enforced { | ||||||||
require.Error(r, err) | ||||||||
require.Contains(r, output, opts.rateLimitOutput, "request was not rate limited") | ||||||||
} else { | ||||||||
require.NoError(r, err) | ||||||||
require.Contains(r, output, opts.successOutput, "request was not successful") | ||||||||
} | ||||||||
require.True(r, time.Since(t0) < time.Second, "failed to make all requests within one second window") | ||||||||
}) | ||||||||
} | ||||||||
|
||||||||
type assertRateLimitOptions struct { | ||||||||
resourceType string | ||||||||
successOutput string | ||||||||
rateLimitOutput string | ||||||||
k8sOpts *terratestK8s.KubectlOptions | ||||||||
sourceApp string | ||||||||
rps int | ||||||||
enforced bool | ||||||||
retryTimer *retry.Timer | ||||||||
curlOpts string | ||||||||
} | ||||||||
|
||||||||
func newRateLimitOptions(t *testing.T, ctx environment.TestContext) *assertRateLimitOptions { | ||||||||
return &assertRateLimitOptions{ | ||||||||
resourceType: "deploy/", | ||||||||
successOutput: "hello world", | ||||||||
rateLimitOutput: "curl: (22) The requested URL returned error: 429", | ||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I might even consider changing this to just contain There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's possible that this could change but I think it's unlikely 🤷. If curl changes its output format/messages we'll likely have to change things elsewhere. I followed what was used in the consul-k8s/acceptance/framework/connhelper/connect_helper.go Lines 417 to 419 in 3d3bd70
|
||||||||
k8sOpts: ctx.KubectlOptions(t), | ||||||||
sourceApp: connhelper.StaticClientName, | ||||||||
retryTimer: &retry.Timer{Timeout: 120 * time.Second, Wait: 2 * time.Second}, | ||||||||
curlOpts: "-vvvsSf", | ||||||||
} | ||||||||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
# Copyright (c) HashiCorp, Inc. | ||
# SPDX-License-Identifier: MPL-2.0 | ||
|
||
apiVersion: consul.hashicorp.com/v1alpha1 | ||
kind: ServiceDefaults | ||
metadata: | ||
name: static-server | ||
namespace: default | ||
spec: | ||
protocol: http | ||
rateLimits: | ||
instanceLevel: | ||
requestsPerSecond: 2 | ||
requestsMaxBurst: 2 | ||
routes: | ||
- pathExact: "/exact" | ||
requestsPerSecond: 3 | ||
requestsMaxBurst: 3 | ||
- pathPrefix: "/prefix" | ||
requestsPerSecond: 4 | ||
- pathRegex: "/regex" | ||
requestsPerSecond: 5 | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Just a thought, but maybe it would be more straightforward to create an array of opts and add the addr/rate limit to each one? Would keep the configuration all in one place.
Then you could just loop on the opts
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yeah that might be a nice update. My only concern with that is we would still have to do bits of the
opts
setup in the loop. I think I'll stick with the host/rps map for now.